diff --git a/.buildinfo b/.buildinfo deleted file mode 100644 index 4a3d13d37..000000000 --- a/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 296f70c81eb5b1a19f49cc58fe310b2a -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_downloads/6cdf6df1cdd6ff82115103c53eafc77b/moviepy_10_minutes.zip b/_downloads/6cdf6df1cdd6ff82115103c53eafc77b/moviepy_10_minutes.zip deleted file mode 100644 index 1ded32690..000000000 Binary files a/_downloads/6cdf6df1cdd6ff82115103c53eafc77b/moviepy_10_minutes.zip and /dev/null differ diff --git a/_images/accel_decel-fx-params.png b/_images/accel_decel-fx-params.png deleted file mode 100644 index cf9dbf9ef..000000000 Binary files a/_images/accel_decel-fx-params.png and /dev/null differ diff --git a/_images/index_api.svg b/_images/index_api.svg deleted file mode 100644 index 69f7ba1d2..000000000 --- a/_images/index_api.svg +++ /dev/null @@ -1,97 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - diff --git a/_images/index_contribute.svg b/_images/index_contribute.svg deleted file mode 100644 index de3d90237..000000000 --- a/_images/index_contribute.svg +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - - - - diff --git a/_images/index_getting_started.svg b/_images/index_getting_started.svg deleted file mode 100644 index 2d36622cb..000000000 --- a/_images/index_getting_started.svg +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - diff --git a/_images/index_user_guide.svg b/_images/index_user_guide.svg deleted file mode 100644 index bd1705351..000000000 --- a/_images/index_user_guide.svg +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - diff --git a/_modules/index.html b/_modules/index.html deleted file mode 100644 index ffb526a4c..000000000 --- a/_modules/index.html +++ /dev/null @@ -1,508 +0,0 @@ - - - - - - - - - - - Overview: module code — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

All modules for which code is available

- - -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/Clip.html b/_modules/moviepy/Clip.html deleted file mode 100644 index e6f3efce3..000000000 --- a/_modules/moviepy/Clip.html +++ /dev/null @@ -1,1123 +0,0 @@ - - - - - - - - - - - moviepy.Clip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.Clip

-"""Implements the central object of MoviePy, the Clip, and all the methods that
-are common to the two subclasses of Clip, VideoClip and AudioClip.
-"""
-
-import copy as _copy
-from functools import reduce
-from numbers import Real
-from operator import add
-from typing import TYPE_CHECKING, List
-
-import numpy as np
-import proglog
-
-
-if TYPE_CHECKING:
-    from moviepy.Effect import Effect
-
-from moviepy.decorators import (
-    apply_to_audio,
-    apply_to_mask,
-    convert_parameter_to_seconds,
-    outplace,
-    requires_duration,
-    use_clip_fps_by_default,
-)
-
-
-
[docs]class Clip: - """Base class of all clips (VideoClips and AudioClips). - - Attributes - ---------- - - start : float - When the clip is included in a composition, time of the - composition at which the clip starts playing (in seconds). - - end : float - When the clip is included in a composition, time of the - composition at which the clip stops playing (in seconds). - - duration : float - Duration of the clip (in seconds). Some clips are infinite, in - this case their duration will be ``None``. - """ - - # prefix for all temporary video and audio files. - # You can overwrite it with - # >>> Clip._TEMP_FILES_PREFIX = "temp_" - - _TEMP_FILES_PREFIX = "TEMP_MPY_" - - def __init__(self): - self.start = 0 - self.end = None - self.duration = None - - self.memoize = False - self.memoized_t = None - self.memoized_frame = None - -
[docs] def copy(self): - """Allows the usage of ``.copy()`` in clips as chained methods invocation.""" - return _copy.copy(self)
- -
[docs] @convert_parameter_to_seconds(["t"]) - def get_frame(self, t): - """Gets a numpy array representing the RGB picture of the clip, - or (mono or stereo) value for a sound clip, at time ``t``. - - Parameters - ---------- - - t : float or tuple or str - Moment of the clip whose frame will be returned. - """ - # Coming soon: smart error handling for debugging at this point - if self.memoize: - if t == self.memoized_t: - return self.memoized_frame - else: - frame = self.make_frame(t) - self.memoized_t = t - self.memoized_frame = frame - return frame - else: - return self.make_frame(t)
- -
[docs] def transform(self, func, apply_to=None, keep_duration=True): - """General processing of a clip. - - Returns a new Clip whose frames are a transformation - (through function ``func``) of the frames of the current clip. - - Parameters - ---------- - - func : function - A function with signature (gf,t -> frame) where ``gf`` will - represent the current clip's ``get_frame`` method, - i.e. ``gf`` is a function (t->image). Parameter `t` is a time - in seconds, `frame` is a picture (=Numpy array) which will be - returned by the transformed clip (see examples below). - - apply_to : {"mask", "audio", ["mask", "audio"]}, optional - Can be either ``'mask'``, or ``'audio'``, or - ``['mask','audio']``. - Specifies if the filter should also be applied to the - audio or the mask of the clip, if any. - - keep_duration : bool, optional - Set to True if the transformation does not change the - ``duration`` of the clip. - - Examples - -------- - - In the following ``new_clip`` a 100 pixels-high clip whose video - content scrolls from the top to the bottom of the frames of - ``clip`` at 50 pixels per second. - - >>> filter = lambda get_frame,t : get_frame(t)[int(t):int(t)+50, :] - >>> new_clip = clip.transform(filter, apply_to='mask') - - """ - if apply_to is None: - apply_to = [] - - # mf = copy(self.make_frame) - new_clip = self.with_make_frame(lambda t: func(self.get_frame, t)) - - if not keep_duration: - new_clip.duration = None - new_clip.end = None - - if isinstance(apply_to, str): - apply_to = [apply_to] - - for attribute in apply_to: - attribute_value = getattr(new_clip, attribute, None) - if attribute_value is not None: - new_attribute_value = attribute_value.transform( - func, keep_duration=keep_duration - ) - setattr(new_clip, attribute, new_attribute_value) - - return new_clip
- -
[docs] def time_transform(self, time_func, apply_to=None, keep_duration=False): - """ - Returns a Clip instance playing the content of the current clip - but with a modified timeline, time ``t`` being replaced by the return - of `time_func(t)`. - - Parameters - ---------- - - time_func : function - A function ``t -> new_t``. - - apply_to : {"mask", "audio", ["mask", "audio"]}, optional - Can be either 'mask', or 'audio', or ['mask','audio']. - Specifies if the filter ``transform`` should also be applied to the - audio or the mask of the clip, if any. - - keep_duration : bool, optional - ``False`` (default) if the transformation modifies the - ``duration`` of the clip. - - Examples - -------- - - >>> # plays the clip (and its mask and sound) twice faster - >>> new_clip = clip.time_transform(lambda t: 2*t, apply_to=['mask', 'audio']) - >>> - >>> # plays the clip starting at t=3, and backwards: - >>> new_clip = clip.time_transform(lambda t: 3-t) - - """ - if apply_to is None: - apply_to = [] - - return self.transform( - lambda get_frame, t: get_frame(time_func(t)), - apply_to, - keep_duration=keep_duration, - )
- -
[docs] def with_effects(self, effects: List["Effect"]): - """Return a copy of the current clip with the effects applied - - >>> new_clip = clip.with_effects([vfx.Resize(0.2, method="bilinear")]) - - You can also pass multiple effect as a list - - >>> clip.with_effects([afx.VolumeX(0.5), vfx.Resize(0.3), vfx.Mirrorx()]) - """ - new_clip = self.copy() - for effect in effects: - # We always copy effect before using it, see Effect.copy - # to see why we need to - effect_copy = effect.copy() - new_clip = effect_copy.apply(new_clip) - - return new_clip
- -
[docs] @apply_to_mask - @apply_to_audio - @convert_parameter_to_seconds(["t"]) - @outplace - def with_start(self, t, change_end=True): - """Returns a copy of the clip, with the ``start`` attribute set - to ``t``, which can be expressed in seconds (15.35), in (min, sec), - in (hour, min, sec), or as a string: '01:03:05.35'. - - These changes are also applied to the ``audio`` and ``mask`` - clips of the current clip, if they exist. - - Parameters - ---------- - - t : float or tuple or str - New ``start`` attribute value for the clip. - - change_end : bool optional - Indicates if the ``end`` attribute value must be changed accordingly, - if possible. If ``change_end=True`` and the clip has a ``duration`` - attribute, the ``end`` attribute of the clip will be updated to - ``start + duration``. If ``change_end=False`` and the clip has a - ``end`` attribute, the ``duration`` attribute of the clip will be - updated to ``end - start``. - """ - self.start = t - if (self.duration is not None) and change_end: - self.end = t + self.duration - elif self.end is not None: - self.duration = self.end - self.start
- -
[docs] @apply_to_mask - @apply_to_audio - @convert_parameter_to_seconds(["t"]) - @outplace - def with_end(self, t): - """Returns a copy of the clip, with the ``end`` attribute set to ``t``, - which can be expressed in seconds (15.35), in (min, sec), in - (hour, min, sec), or as a string: '01:03:05.35'. Also sets the duration - of the mask and audio, if any, of the returned clip. - - Parameters - ---------- - - t : float or tuple or str - New ``end`` attribute value for the clip. - """ - self.end = t - if self.end is None: - return - if self.start is None: - if self.duration is not None: - self.start = max(0, t - self.duration) - else: - self.duration = self.end - self.start
- -
[docs] @apply_to_mask - @apply_to_audio - @convert_parameter_to_seconds(["duration"]) - @outplace - def with_duration(self, duration, change_end=True): - """Returns a copy of the clip, with the ``duration`` attribute set to - ``t``, which can be expressed in seconds (15.35), in (min, sec), in - (hour, min, sec), or as a string: '01:03:05.35'. Also sets the duration - of the mask and audio, if any, of the returned clip. - - If ``change_end is False``, the start attribute of the clip will be - modified in function of the duration and the preset end of the clip. - - Parameters - ---------- - - duration : float - New duration attribute value for the clip. - - change_end : bool, optional - If ``True``, the ``end`` attribute value of the clip will be adjusted - accordingly to the new duration using ``clip.start + duration``. - """ - self.duration = duration - - if change_end: - self.end = None if (duration is None) else (self.start + duration) - else: - if self.duration is None: - raise ValueError("Cannot change clip start when new duration is None") - self.start = self.end - duration
- -
[docs] @outplace - def with_make_frame(self, make_frame): - """Sets a ``make_frame`` attribute for the clip. Useful for setting - arbitrary/complicated videoclips. - - Parameters - ---------- - - make_frame : function - New frame creator function for the clip. - """ - self.make_frame = make_frame
- -
[docs] def with_fps(self, fps, change_duration=False): - """Returns a copy of the clip with a new default fps for functions like - write_videofile, iterframe, etc. - - Parameters - ---------- - - fps : int - New ``fps`` attribute value for the clip. - - change_duration : bool, optional - If ``change_duration=True``, then the video speed will change to - match the new fps (conserving all frames 1:1). For example, if the - fps is halved in this mode, the duration will be doubled. - """ - if change_duration: - from moviepy.video.fx.MultiplySpeed import MultiplySpeed - - newclip = self.with_effects([MultiplySpeed(fps / self.fps)]) - else: - newclip = self.copy() - - newclip.fps = fps - return newclip
- -
[docs] @outplace - def with_is_mask(self, is_mask): - """Says whether the clip is a mask or not. - - Parameters - ---------- - - is_mask : bool - New ``is_mask`` attribute value for the clip. - """ - self.is_mask = is_mask
- -
[docs] @outplace - def with_memoize(self, memoize): - """Sets whether the clip should keep the last frame read in memory. - - Parameters - ---------- - - memoize : bool - Indicates if the clip should keep the last frame read in memory. - """ - self.memoize = memoize
- -
[docs] @convert_parameter_to_seconds(["start_time", "end_time"]) - @apply_to_mask - @apply_to_audio - def with_subclip(self, start_time=0, end_time=None): - """Returns a clip playing the content of the current clip between times - ``start_time`` and ``end_time``, which can be expressed in seconds - (15.35), in (min, sec), in (hour, min, sec), or as a string: - '01:03:05.35'. - - The ``mask`` and ``audio`` of the resulting subclip will be subclips of - ``mask`` and ``audio`` the original clip, if they exist. - - It's equivalent to slice the clip as a sequence, like - ``clip[t_start:t_end]``. - - Parameters - ---------- - - start_time : float or tuple or str, optional - Moment that will be chosen as the beginning of the produced clip. If - is negative, it is reset to ``clip.duration + start_time``. - - end_time : float or tuple or str, optional - Moment that will be chosen as the end of the produced clip. If not - provided, it is assumed to be the duration of the clip (potentially - infinite). If is negative, it is reset to ``clip.duration + end_time``. - For instance: - - >>> # cut the last two seconds of the clip: - >>> new_clip = clip.with_subclip(0, -2) - - If ``end_time`` is provided or if the clip has a duration attribute, - the duration of the returned clip is set automatically. - """ - if start_time < 0: - # Make this more Python-like, a negative value means to move - # backward from the end of the clip - start_time = self.duration + start_time # Remember start_time is negative - - if (self.duration is not None) and (start_time >= self.duration): - raise ValueError( - "start_time (%.02f) " % start_time - + "should be smaller than the clip's " - + "duration (%.02f)." % self.duration - ) - - new_clip = self.time_transform(lambda t: t + start_time, apply_to=[]) - - if (end_time is None) and (self.duration is not None): - end_time = self.duration - - elif (end_time is not None) and (end_time < 0): - if self.duration is None: - raise ValueError( - ( - "Subclip with negative times (here %s)" - " can only be extracted from clips with a ``duration``" - ) - % (str((start_time, end_time))) - ) - - else: - end_time = self.duration + end_time - - if end_time is not None: - new_clip.duration = end_time - start_time - new_clip.end = new_clip.start + new_clip.duration - - return new_clip
- -
[docs] @convert_parameter_to_seconds(["start_time", "end_time"]) - def with_cutout(self, start_time, end_time): - """ - Returns a clip playing the content of the current clip but - skips the extract between ``start_time`` and ``end_time``, which can be - expressed in seconds (15.35), in (min, sec), in (hour, min, sec), - or as a string: '01:03:05.35'. - - If the original clip has a ``duration`` attribute set, - the duration of the returned clip is automatically computed as - `` duration - (end_time - start_time)``. - - The resulting clip's ``audio`` and ``mask`` will also be cutout - if they exist. - - Parameters - ---------- - - start_time : float or tuple or str - Moment from which frames will be ignored in the resulting output. - - end_time : float or tuple or str - Moment until which frames will be ignored in the resulting output. - """ - new_clip = self.time_transform( - lambda t: t + (t >= start_time) * (end_time - start_time), - apply_to=["audio", "mask"], - ) - - if self.duration is not None: - return new_clip.with_duration(self.duration - (end_time - start_time)) - else: # pragma: no cover - return new_clip
- -
[docs] def with_multiply_speed(self, factor: float = None, final_duration: float = None): - """Returns a clip playing the current clip but at a speed multiplied - by ``factor``. For info on the parameters, please see ``vfx.MultiplySpeed``. - """ - from moviepy.video.fx.MultiplySpeed import MultiplySpeed - - return self.with_effects( - [MultiplySpeed(factor=factor, final_duration=final_duration)] - )
- -
[docs] def with_multiply_volume(self, factor: float, start_time=None, end_time=None): - """Returns a new clip with audio volume multiplied by the value `factor`. - For info on the parameters, please see ``afx.MultiplyVolume`` - """ - from moviepy.audio.fx.MultiplyVolume import MultiplyVolume - - return self.with_effects( - [MultiplyVolume(factor=factor, start_time=start_time, end_time=end_time)] - )
- -
[docs] @requires_duration - @use_clip_fps_by_default - def iter_frames(self, fps=None, with_times=False, logger=None, dtype=None): - """Iterates over all the frames of the clip. - - Returns each frame of the clip as a HxWxN Numpy array, - where N=1 for mask clips and N=3 for RGB clips. - - This function is not really meant for video editing. It provides an - easy way to do frame-by-frame treatment of a video, for fields like - science, computer vision... - - Parameters - ---------- - - fps : int, optional - Frames per second for clip iteration. Is optional if the clip already - has a ``fps`` attribute. - - with_times : bool, optional - Ff ``True`` yield tuples of ``(t, frame)`` where ``t`` is the current - time for the frame, otherwise only a ``frame`` object. - - logger : str, optional - Either ``"bar"`` for progress bar or ``None`` or any Proglog logger. - - dtype : type, optional - Type to cast Numpy array frames. Use ``dtype="uint8"`` when using the - pictures to write video, images... - - Examples - -------- - - >>> # prints the maximum of red that is contained - >>> # on the first line of each frame of the clip. - >>> from moviepy import VideoFileClip - >>> myclip = VideoFileClip('myvideo.mp4') - >>> print ( [frame[0,:,0].max() - for frame in myclip.iter_frames()]) - """ - logger = proglog.default_bar_logger(logger) - for frame_index in logger.iter_bar( - frame_index=np.arange(0, int(self.duration * fps)) - ): - # int is used to ensure that floating point errors are rounded - # down to the nearest integer - t = frame_index / fps - - frame = self.get_frame(t) - if (dtype is not None) and (frame.dtype != dtype): - frame = frame.astype(dtype) - if with_times: - yield t, frame - else: - yield frame
- -
[docs] @convert_parameter_to_seconds(["t"]) - def is_playing(self, t): - """If ``t`` is a time, returns true if t is between the start and the end - of the clip. ``t`` can be expressed in seconds (15.35), in (min, sec), in - (hour, min, sec), or as a string: '01:03:05.35'. If ``t`` is a numpy - array, returns False if none of the ``t`` is in the clip, else returns a - vector [b_1, b_2, b_3...] where b_i is true if tti is in the clip. - """ - if isinstance(t, np.ndarray): - # is the whole list of t outside the clip ? - tmin, tmax = t.min(), t.max() - - if (self.end is not None) and (tmin >= self.end): - return False - - if tmax < self.start: - return False - - # If we arrive here, a part of t falls in the clip - result = 1 * (t >= self.start) - if self.end is not None: - result *= t <= self.end - return result - - else: - return (t >= self.start) and ((self.end is None) or (t < self.end))
- -
[docs] def close(self): - """Release any resources that are in use.""" - # Implementation note for subclasses: - # - # * Memory-based resources can be left to the garbage-collector. - # * However, any open files should be closed, and subprocesses - # should be terminated. - # * Be wary that shallow copies are frequently used. - # Closing a Clip may affect its copies. - # * Therefore, should NOT be called by __del__(). - pass
- - def __eq__(self, other): - if not isinstance(other, Clip): - return NotImplemented - - # Make sure that the total number of frames is the same - self_length = self.duration * self.fps - other_length = other.duration * other.fps - if self_length != other_length: - return False - - # Make sure that each frame is the same - for frame1, frame2 in zip(self.iter_frames(), other.iter_frames()): - if not np.array_equal(frame1, frame2): - return False - - return True - - def __enter__(self): - """ - Support the Context Manager protocol, - to ensure that resources are cleaned up. - """ - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def __getitem__(self, key): - """ - Support extended slice and index operations over - a clip object. - - Simple slicing is implemented via `subclip`. - So, ``clip[t_start:t_end]`` is equivalent to - ``clip.with_subclip(t_start, t_end)``. If ``t_start`` is not - given, default to ``0``, if ``t_end`` is not given, - default to ``self.duration``. - - The slice object optionally support a third argument as - a ``speed`` coefficient (that could be negative), - ``clip[t_start:t_end:speed]``. - - For example ``clip[::-1]`` returns a reversed (a time_mirror fx) - the video and ``clip[:5:2]`` returns the segment from 0 to 5s - accelerated to 2x (ie. resulted duration would be 2.5s) - - In addition, a tuple of slices is supported, resulting in the concatenation - of each segment. For example ``clip[(:1, 2:)]`` return a clip - with the segment from 1 to 2s removed. - - If ``key`` is not a slice or tuple, we assume it's a time - value (expressed in any format supported by `cvsec`) - and return the frame at that time, passing the key - to ``get_frame``. - """ - apply_to = ["mask", "audio"] - if isinstance(key, slice): - # support for [start:end:speed] slicing. If speed is negative - # a time mirror is applied. - clip = self.with_subclip(key.start or 0, key.stop or self.duration) - - if key.step: - # change speed of the subclip - factor = abs(key.step) - if factor != 1: - # change speed - clip = clip.time_transform( - lambda t: factor * t, apply_to=apply_to, keep_duration=True - ) - clip = clip.with_duration(1.0 * clip.duration / factor) - if key.step < 0: - # time mirror - clip = clip.time_transform( - lambda t: clip.duration - t - 1, - keep_duration=True, - apply_to=apply_to, - ) - return clip - elif isinstance(key, tuple): - # get a concatenation of subclips - return reduce(add, (self[k] for k in key)) - else: - return self.get_frame(key) - - def __del__(self): - # WARNING: as stated in close() above, if we call close, it closes clips - # even if shallow copies are still in used, leading to some bugs, see: - # https://github.com/Zulko/moviepy/issues/1994 - # so don't call self.close() here, rather do it manually in the code. - pass - - def __add__(self, other): - # concatenate. implemented in specialized classes - return NotImplemented - - def __mul__(self, n): - # loop n times where N is a real - if not isinstance(n, Real): - return NotImplemented - - from moviepy.video.fx.Loop import Loop - - return self.with_effects([Loop(n)])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/Effect.html b/_modules/moviepy/Effect.html deleted file mode 100644 index d940fa9f6..000000000 --- a/_modules/moviepy/Effect.html +++ /dev/null @@ -1,486 +0,0 @@ - - - - - - - - - - - moviepy.Effect — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.Effect

-"""Defines the base class for all effects in MoviePy."""
-
-import copy as _copy
-from abc import ABCMeta, abstractmethod
-
-from moviepy.Clip import Clip
-
-
-
[docs]class Effect(metaclass=ABCMeta): - """Base abstract class for all effects in MoviePy. - Any new effect have to extend this base class. - """ - -
[docs] def copy(self): - """Return a shallow copy of an Effect. - - You must *always* copy an ``Effect`` before applying, - because some of them will modify their own attributes when applied. - For example, setting a previously unset property by using target clip property. - - If we was to use the original effect, calling the same effect multiple times - could lead to different properties, and different results for equivalent clips. - - By using copy, we ensure we can use the same effect object multiple times while - maintaining the same behavior/result. - - In a way, copy makes the effect himself being kind of idempotent. - """ - return _copy.copy(self)
- -
[docs] @abstractmethod - def apply(self, clip: Clip) -> Clip: - """Apply the current effect on a clip - - Parameters - ---------- - clip - The target clip to apply the effect on. - (Internally, MoviePy will always pass a copy of the original clip) - - """ - pass
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/AudioClip.html b/_modules/moviepy/audio/AudioClip.html deleted file mode 100644 index d7ea678e6..000000000 --- a/_modules/moviepy/audio/AudioClip.html +++ /dev/null @@ -1,879 +0,0 @@ - - - - - - - - - - - moviepy.audio.AudioClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.AudioClip

-"""Implements AudioClip (base class for audio clips) and its main subclasses:
-
-- Audio clips: AudioClip, AudioFileClip, AudioArrayClip
-- Composition: CompositeAudioClip
-"""
-
-import numbers
-import os
-
-import numpy as np
-import proglog
-
-from moviepy.audio.io.ffmpeg_audiowriter import ffmpeg_audiowrite
-from moviepy.audio.io.ffplay_audiopreviewer import ffplay_audiopreview
-from moviepy.Clip import Clip
-from moviepy.decorators import convert_path_to_string, requires_duration
-from moviepy.tools import extensions_dict
-
-
-
[docs]class AudioClip(Clip): - """Base class for audio clips. - - See ``AudioFileClip`` and ``CompositeAudioClip`` for usable classes. - - An AudioClip is a Clip with a ``make_frame`` attribute of - the form `` t -> [ f_t ]`` for mono sound and - ``t-> [ f1_t, f2_t ]`` for stereo sound (the arrays are Numpy arrays). - The `f_t` are floats between -1 and 1. These bounds can be - trespassed without problems (the program will put the - sound back into the bounds at conversion time, without much impact). - - Parameters - ---------- - - make_frame - A function `t-> frame at time t`. The frame does not mean much - for a sound, it is just a float. What 'makes' the sound are - the variations of that float in the time. - - duration - Duration of the clip (in seconds). Some clips are infinite, in - this case their duration will be ``None``. - - nchannels - Number of channels (one or two for mono or stereo). - - Examples - -------- - - >>> # Plays the note A in mono (a sine wave of frequency 440 Hz) - >>> import numpy as np - >>> make_frame = lambda t: np.sin(440 * 2 * np.pi * t) - >>> clip = AudioClip(make_frame, duration=5, fps=44100) - >>> clip.preview() - - >>> # Plays the note A in stereo (two sine waves of frequencies 440 and 880 Hz) - >>> make_frame = lambda t: np.array([ - ... np.sin(440 * 2 * np.pi * t), - ... np.sin(880 * 2 * np.pi * t) - ... ]).T.copy(order="C") - >>> clip = AudioClip(make_frame, duration=3, fps=44100) - >>> clip.preview() - - """ - - def __init__(self, make_frame=None, duration=None, fps=None): - super().__init__() - - if fps is not None: - self.fps = fps - - if make_frame is not None: - self.make_frame = make_frame - frame0 = self.get_frame(0) - if hasattr(frame0, "__iter__"): - self.nchannels = len(list(frame0)) - else: - self.nchannels = 1 - if duration is not None: - self.duration = duration - self.end = duration - -
[docs] @requires_duration - def iter_chunks( - self, - chunksize=None, - chunk_duration=None, - fps=None, - quantize=False, - nbytes=2, - logger=None, - ): - """Iterator that returns the whole sound array of the clip by chunks""" - if fps is None: - fps = self.fps - logger = proglog.default_bar_logger(logger) - if chunk_duration is not None: - chunksize = int(chunk_duration * fps) - - total_size = int(fps * self.duration) - - nchunks = total_size // chunksize + 1 - - positions = np.linspace(0, total_size, nchunks + 1, endpoint=True, dtype=int) - - for i in logger.iter_bar(chunk=list(range(nchunks))): - size = positions[i + 1] - positions[i] - assert size <= chunksize - timings = (1.0 / fps) * np.arange(positions[i], positions[i + 1]) - yield self.to_soundarray( - timings, nbytes=nbytes, quantize=quantize, fps=fps, buffersize=chunksize - )
- -
[docs] @requires_duration - def to_soundarray( - self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000 - ): - """ - Transforms the sound into an array that can be played by pygame - or written in a wav file. See ``AudioClip.preview``. - - Parameters - ---------- - - fps - Frame rate of the sound for the conversion. - 44100 for top quality. - - nbytes - Number of bytes to encode the sound: 1 for 8bit sound, - 2 for 16bit, 4 for 32bit sound. - - """ - if tt is None: - if fps is None: - fps = self.fps - - max_duration = 1 * buffersize / fps - if self.duration > max_duration: - stacker = np.vstack if self.nchannels == 2 else np.hstack - return stacker( - tuple( - self.iter_chunks( - fps=fps, quantize=quantize, nbytes=2, chunksize=buffersize - ) - ) - ) - else: - tt = np.arange(0, self.duration, 1.0 / fps) - """ - elif len(tt)> 1.5*buffersize: - nchunks = int(len(tt)/buffersize+1) - tt_chunks = np.array_split(tt, nchunks) - return stacker([self.to_soundarray(tt=ttc, buffersize=buffersize, fps=fps, - quantize=quantize, nbytes=nbytes) - for ttc in tt_chunks]) - """ - snd_array = self.get_frame(tt) - - if quantize: - snd_array = np.maximum(-0.99, np.minimum(0.99, snd_array)) - inttype = {1: "int8", 2: "int16", 4: "int32"}[nbytes] - snd_array = (2 ** (8 * nbytes - 1) * snd_array).astype(inttype) - - return snd_array
- -
[docs] def max_volume(self, stereo=False, chunksize=50000, logger=None): - """Returns the maximum volume level of the clip.""" - # max volume separated by channels if ``stereo`` and not mono - stereo = stereo and self.nchannels > 1 - - # zero for each channel - maxi = np.zeros(self.nchannels) - for chunk in self.iter_chunks(chunksize=chunksize, logger=logger): - maxi = np.maximum(maxi, abs(chunk).max(axis=0)) - - # if mono returns float, otherwise array of volumes by channel - return maxi if stereo else maxi[0]
- -
[docs] @requires_duration - @convert_path_to_string("filename") - def write_audiofile( - self, - filename, - fps=None, - nbytes=2, - buffersize=2000, - codec=None, - bitrate=None, - ffmpeg_params=None, - write_logfile=False, - logger="bar", - ): - """Writes an audio file from the AudioClip. - - - Parameters - ---------- - - filename - Name of the output file, as a string or a path-like object. - - fps - Frames per second. If not set, it will try default to self.fps if - already set, otherwise it will default to 44100. - - nbytes - Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound) - - buffersize - The sound is not generated all at once, but rather made by bunches - of frames (chunks). ``buffersize`` is the size of such a chunk. - Try varying it if you meet audio problems (but you shouldn't - have to). Default to 2000 - - codec - Which audio codec should be used. If None provided, the codec is - determined based on the extension of the filename. Choose - 'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav. - - bitrate - Audio bitrate, given as a string like '50k', '500k', '3000k'. - Will determine the size and quality of the output file. - Note that it mainly an indicative goal, the bitrate won't - necessarily be the this in the output file. - - ffmpeg_params - Any additional parameters you would like to pass, as a list - of terms, like ['-option1', 'value1', '-option2', 'value2'] - - write_logfile - If true, produces a detailed logfile named filename + '.log' - when writing the file - - logger - Either ``"bar"`` for progress bar or ``None`` or any Proglog logger. - - """ - if not fps: - if not self.fps: - fps = 44100 - else: - fps = self.fps - - if codec is None: - name, ext = os.path.splitext(os.path.basename(filename)) - try: - codec = extensions_dict[ext[1:]]["codec"][0] - except KeyError: - raise ValueError( - "MoviePy couldn't find the codec associated " - "with the filename. Provide the 'codec' " - "parameter in write_audiofile." - ) - - return ffmpeg_audiowrite( - self, - filename, - fps, - nbytes, - buffersize, - codec=codec, - bitrate=bitrate, - write_logfile=write_logfile, - ffmpeg_params=ffmpeg_params, - logger=logger, - )
- -
[docs] @requires_duration - def audiopreview( - self, fps=None, buffersize=2000, nbytes=2, audio_flag=None, video_flag=None - ): - """ - Preview an AudioClip using ffplay - - Parameters - ---------- - - fps - Frame rate of the sound. 44100 gives top quality, but may cause - problems if your computer is not fast enough and your clip is - complicated. If the sound jumps during the preview, lower it - (11025 is still fine, 5000 is tolerable). - - buffersize - The sound is not generated all at once, but rather made by bunches - of frames (chunks). ``buffersize`` is the size of such a chunk. - Try varying it if you meet audio problems (but you shouldn't - have to). - - nbytes: - Number of bytes to encode the sound: 1 for 8bit sound, 2 for - 16bit, 4 for 32bit sound. 2 bytes is fine. - - audio_flag, video_flag: - Instances of class threading events that are used to synchronize - video and audio during ``VideoClip.preview()``. - """ - ffplay_audiopreview( - clip=self, - fps=fps, - buffersize=buffersize, - nbytes=nbytes, - audio_flag=audio_flag, - video_flag=video_flag, - )
- - def __add__(self, other): - if isinstance(other, AudioClip): - return concatenate_audioclips([self, other]) - return super(AudioClip, self).__add__(other)
- - -
[docs]class AudioArrayClip(AudioClip): - """ - - An audio clip made from a sound array. - - Parameters - ---------- - - array - A Numpy array representing the sound, of size Nx1 for mono, - Nx2 for stereo. - - fps - Frames per second : speed at which the sound is supposed to be - played. - - """ - - def __init__(self, array, fps): - Clip.__init__(self) - self.array = array - self.fps = fps - self.duration = 1.0 * len(array) / fps - - def make_frame(t): - """Complicated, but must be able to handle the case where t - is a list of the form sin(t). - """ - if isinstance(t, np.ndarray): - array_inds = np.round(self.fps * t).astype(int) - in_array = (array_inds >= 0) & (array_inds < len(self.array)) - result = np.zeros((len(t), 2)) - result[in_array] = self.array[array_inds[in_array]] - return result - else: - i = int(self.fps * t) - if i < 0 or i >= len(self.array): - return 0 * self.array[0] - else: - return self.array[i] - - self.make_frame = make_frame - self.nchannels = len(list(self.get_frame(0)))
- - -
[docs]class CompositeAudioClip(AudioClip): - """Clip made by composing several AudioClips. - - An audio clip made by putting together several audio clips. - - Parameters - ---------- - - clips - List of audio clips, which may start playing at different times or - together, depends on their ``start`` attributes. If all have their - ``duration`` attribute set, the duration of the composite clip is - computed automatically. - """ - - def __init__(self, clips): - self.clips = clips - self.nchannels = max(clip.nchannels for clip in self.clips) - - # self.duration is set at AudioClip - duration = None - for end in self.ends: - if end is None: - break - duration = max(end, duration or 0) - - # self.fps is set at AudioClip - fps = None - for clip in self.clips: - if hasattr(clip, "fps") and isinstance(clip.fps, numbers.Number): - fps = max(clip.fps, fps or 0) - - super().__init__(duration=duration, fps=fps) - - @property - def starts(self): - """Returns starting times for all clips in the composition.""" - return (clip.start for clip in self.clips) - - @property - def ends(self): - """Returns ending times for all clips in the composition.""" - return (clip.end for clip in self.clips) - -
[docs] def make_frame(self, t): - """Renders a frame for the composition for the time ``t``.""" - played_parts = [clip.is_playing(t) for clip in self.clips] - - sounds = [ - clip.get_frame(t - clip.start) * np.array([part]).T - for clip, part in zip(self.clips, played_parts) - if (part is not False) - ] - - if isinstance(t, np.ndarray): - zero = np.zeros((len(t), self.nchannels)) - else: - zero = np.zeros(self.nchannels) - - return zero + sum(sounds)
- - -
[docs]def concatenate_audioclips(clips): - """Concatenates one AudioClip after another, in the order that are passed - to ``clips`` parameter. - - Parameters - ---------- - - clips - List of audio clips, which will be played one after other. - """ - # start, end/start2, end2/start3... end - starts_end = np.cumsum([0, *[clip.duration for clip in clips]]) - newclips = [clip.with_start(t) for clip, t in zip(clips, starts_end[:-1])] - - return CompositeAudioClip(newclips).with_duration(starts_end[-1])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/fx/AudioDelay.html b/_modules/moviepy/audio/fx/AudioDelay.html deleted file mode 100644 index 67a6d739a..000000000 --- a/_modules/moviepy/audio/fx/AudioDelay.html +++ /dev/null @@ -1,512 +0,0 @@ - - - - - - - - - - - moviepy.audio.fx.AudioDelay — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.fx.AudioDelay

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.audio.AudioClip import CompositeAudioClip
-from moviepy.audio.fx.MultiplyVolume import MultiplyVolume
-from moviepy.Clip import Clip
-from moviepy.decorators import audio_video_effect
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class AudioDelay(Effect): - """Repeats audio certain number of times at constant intervals multiplying - their volume levels using a linear space in the range 1 to ``decay`` argument - value. - - Parameters - ---------- - - offset : float, optional - Gap between repetitions start times, in seconds. - - n_repeats : int, optional - Number of repetitions (without including the clip itself). - - decay : float, optional - Multiplication factor for the volume level of the last repetition. Each - repetition will have a value in the linear function between 1 and this value, - increasing or decreasing constantly. Keep in mind that the last repetition - will be muted if this is 0, and if is greater than 1, the volume will increase - for each repetition. - - Examples - -------- - - >>> from moviepy import * - >>> videoclip = AudioFileClip('myaudio.wav').with_effects([ - ... afx.AudioDelay(offset=.2, n_repeats=10, decayment=.2) - ... ]) - - >>> # stereo A note - >>> make_frame = lambda t: np.array( - ... [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)] - ... ).T - ... clip = AudioClip(make_frame=make_frame, duration=0.1, fps=44100) - ... clip = clip.with_effects([afx.AudioDelay(offset=.2, n_repeats=11, decay=0)]) - """ - - offset: float = 0.2 - n_repeats: int = 8 - decay: float = 1 - -
[docs] @audio_video_effect - def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - decayments = np.linspace(1, max(0, self.decay), self.n_repeats + 1) - return CompositeAudioClip( - [ - clip.copy(), - *[ - clip.with_start((rep + 1) * self.offset).with_effects( - [MultiplyVolume(decayments[rep + 1])] - ) - for rep in range(self.n_repeats) - ], - ] - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/fx/AudioFadeIn.html b/_modules/moviepy/audio/fx/AudioFadeIn.html deleted file mode 100644 index 3ff4f2ed6..000000000 --- a/_modules/moviepy/audio/fx/AudioFadeIn.html +++ /dev/null @@ -1,502 +0,0 @@ - - - - - - - - - - - moviepy.audio.fx.AudioFadeIn — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.fx.AudioFadeIn

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.decorators import audio_video_effect
-from moviepy.Effect import Effect
-from moviepy.tools import convert_to_seconds
-
-
-
[docs]@dataclass -class AudioFadeIn(Effect): - """Return an audio (or video) clip that is first mute, then the - sound arrives progressively over ``duration`` seconds. - - Parameters - ---------- - - duration : float - How long does it take for the sound to return to its normal level. - - Examples - -------- - - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.with_effects([vfx.AudioFadeIn("00:00:06")]) - """ - - duration: float - - def __post_init__(self): - self.duration = convert_to_seconds(self.duration) - - def _mono_factor_getter(self): - return lambda t, duration: np.minimum(t / duration, 1) - - def _stereo_factor_getter(self, nchannels): - def getter(t, duration): - factor = np.minimum(t / duration, 1) - return np.array([factor for _ in range(nchannels)]).T - - return getter - -
[docs] @audio_video_effect - def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - get_factor = ( - self._mono_factor_getter() - if clip.nchannels == 1 - else self._stereo_factor_getter(clip.nchannels) - ) - - return clip.transform( - lambda get_frame, t: get_factor(t, self.duration) * get_frame(t), - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/fx/AudioFadeOut.html b/_modules/moviepy/audio/fx/AudioFadeOut.html deleted file mode 100644 index 0fc34e74d..000000000 --- a/_modules/moviepy/audio/fx/AudioFadeOut.html +++ /dev/null @@ -1,504 +0,0 @@ - - - - - - - - - - - moviepy.audio.fx.AudioFadeOut — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.fx.AudioFadeOut

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.decorators import audio_video_effect
-from moviepy.Effect import Effect
-from moviepy.tools import convert_to_seconds
-
-
-
[docs]@dataclass -class AudioFadeOut(Effect): - """Return a sound clip where the sound fades out progressively - over ``duration`` seconds at the end of the clip. - - Parameters - ---------- - - duration : float - How long does it take for the sound to reach the zero level at the end - of the clip. - - Examples - -------- - - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.with_effects([afx.AudioFadeOut("00:00:06")]) - """ - - duration: float - - def __post_init__(self): - self.duration = convert_to_seconds(self.duration) - - def _mono_factor_getter(self, clip_duration): - return lambda t, duration: np.minimum(1.0 * (clip_duration - t) / duration, 1) - - def _stereo_factor_getter(self, clip_duration, nchannels): - def getter(t, duration): - factor = np.minimum(1.0 * (clip_duration - t) / duration, 1) - return np.array([factor for _ in range(nchannels)]).T - - return getter - -
[docs] @audio_video_effect - def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - get_factor = ( - self._mono_factor_getter(clip.duration) - if clip.nchannels == 1 - else self._stereo_factor_getter(clip.duration, clip.nchannels) - ) - - return clip.transform( - lambda get_frame, t: get_factor(t, self.duration) * get_frame(t), - keep_duration=True, - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/fx/AudioLoop.html b/_modules/moviepy/audio/fx/AudioLoop.html deleted file mode 100644 index fdefdb3e0..000000000 --- a/_modules/moviepy/audio/fx/AudioLoop.html +++ /dev/null @@ -1,483 +0,0 @@ - - - - - - - - - - - moviepy.audio.fx.AudioLoop — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.fx.AudioLoop

-from dataclasses import dataclass
-
-from moviepy.audio.AudioClip import concatenate_audioclips
-from moviepy.Clip import Clip
-from moviepy.decorators import audio_video_effect
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class AudioLoop(Effect): - """Loops over an audio clip. - - Returns an audio clip that plays the given clip either - `n_loops` times, or during `duration` seconds. - - Examples - -------- - - >>> from moviepy import * - >>> videoclip = VideoFileClip('myvideo.mp4') - >>> music = AudioFileClip('music.ogg') - >>> audio = music.with_effects([afx.AudioLoop(duration=videoclip.duration)]) - >>> videoclip.with_audio(audio) - - """ - - n_loops: int = None - duration: float = None - -
[docs] @audio_video_effect - def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if self.duration is not None: - self.n_loops = int(self.duration / clip.duration) + 1 - return concatenate_audioclips(self.n_loops * [clip]).with_duration( - self.duration - ) - - return concatenate_audioclips(self.n_loops * [clip])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/fx/AudioNormalize.html b/_modules/moviepy/audio/fx/AudioNormalize.html deleted file mode 100644 index a40fa5f65..000000000 --- a/_modules/moviepy/audio/fx/AudioNormalize.html +++ /dev/null @@ -1,475 +0,0 @@ - - - - - - - - - - - moviepy.audio.fx.AudioNormalize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.fx.AudioNormalize

-from dataclasses import dataclass
-
-from moviepy.audio.fx.MultiplyVolume import MultiplyVolume
-from moviepy.Clip import Clip
-from moviepy.decorators import audio_video_effect
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class AudioNormalize(Effect): - """Return a clip whose volume is normalized to 0db. - - Return an audio (or video) clip whose audio volume is normalized - so that the maximum volume is at 0db, the maximum achievable volume. - - Examples - -------- - - >>> from moviepy import * - >>> videoclip = VideoFileClip('myvideo.mp4').with_effects([afx.AudioNormalize()]) - - """ - -
[docs] @audio_video_effect - def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - max_volume = clip.max_volume() - if max_volume == 0: - return clip - else: - return clip.with_effects([MultiplyVolume(1 / max_volume)])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/fx/MultiplyStereoVolume.html b/_modules/moviepy/audio/fx/MultiplyStereoVolume.html deleted file mode 100644 index 162d3df85..000000000 --- a/_modules/moviepy/audio/fx/MultiplyStereoVolume.html +++ /dev/null @@ -1,486 +0,0 @@ - - - - - - - - - - - moviepy.audio.fx.MultiplyStereoVolume — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.fx.MultiplyStereoVolume

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.decorators import audio_video_effect
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class MultiplyStereoVolume(Effect): - """For a stereo audioclip, this function enables to change the volume - of the left and right channel separately (with the factors `left` - and `right`). Makes a stereo audio clip in which the volume of left - and right is controllable. - - Examples - -------- - - >>> from moviepy import AudioFileClip - >>> music = AudioFileClip('music.ogg') - >>> # mutes left channel - >>> audio_r = music.with_effects([afx.MultiplyStereoVolume(left=0, right=1)]) - >>> # halves audio volume - >>> audio_h = music.with_effects([afx.MultiplyStereoVolume(left=0.5, right=0.5)]) - """ - - left: float = 1 - right: float = 1 - -
[docs] @audio_video_effect - def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - - def stereo_volume(get_frame, t): - frame = get_frame(t) - if len(frame) == 1: # mono - frame *= self.left if self.left is not None else self.right - else: # stereo, stereo surround... - for i in range(len(frame[0])): # odd channels are left - frame[:, i] *= self.left if i % 2 == 0 else self.right - return frame - - return clip.transform(stereo_volume)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/fx/MultiplyVolume.html b/_modules/moviepy/audio/fx/MultiplyVolume.html deleted file mode 100644 index 0e1d06f01..000000000 --- a/_modules/moviepy/audio/fx/MultiplyVolume.html +++ /dev/null @@ -1,532 +0,0 @@ - - - - - - - - - - - moviepy.audio.fx.MultiplyVolume — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.fx.MultiplyVolume

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.decorators import audio_video_effect
-from moviepy.Effect import Effect
-from moviepy.tools import convert_to_seconds
-
-
-
[docs]@dataclass -class MultiplyVolume(Effect): - """Returns a clip with audio volume multiplied by the - value `factor`. Can be applied to both audio and video clips. - - Parameters - ---------- - - factor : float - Volume multiplication factor. - - start_time : float, optional - Time from the beginning of the clip until the volume transformation - begins to take effect, in seconds. By default at the beginning. - - end_time : float, optional - Time from the beginning of the clip until the volume transformation - ends to take effect, in seconds. By default at the end. - - Examples - -------- - - >>> from moviepy import AudioFileClip - >>> - >>> music = AudioFileClip("music.ogg") - >>> # doubles audio volume - >>> doubled_audio_clip = music.with_effects([afx.MultiplyVolume(2)]) - >>> # halves audio volume - >>> half_audio_clip = music.with_effects([afx.MultiplyVolume(0.5)]) - >>> # silences clip during one second at third - >>> effect = afx.MultiplyVolume(0, start_time=2, end_time=3) - >>> silenced_clip = clip.with_effects([effect]) - """ - - factor: float - start_time: float = None - end_time: float = None - - def __post_init__(self): - if self.start_time is not None: - self.start_time = convert_to_seconds(self.start_time) - - if self.end_time is not None: - self.end_time = convert_to_seconds(self.end_time) - - def _multiply_volume_in_range(self, factor, start_time, end_time, nchannels): - def factors_filter(factor, t): - return np.array([factor if start_time <= t_ <= end_time else 1 for t_ in t]) - - def multiply_stereo_volume(get_frame, t): - return np.multiply( - get_frame(t), - np.array([factors_filter(factor, t) for _ in range(nchannels)]).T, - ) - - def multiply_mono_volume(get_frame, t): - return np.multiply(get_frame(t), factors_filter(factor, t)) - - return multiply_mono_volume if nchannels == 1 else multiply_stereo_volume - -
[docs] @audio_video_effect - def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if self.start_time is None and self.end_time is None: - return clip.transform( - lambda get_frame, t: self.factor * get_frame(t), - keep_duration=True, - ) - - return clip.transform( - self._multiply_volume_in_range( - self.factor, - clip.start if self.start_time is None else self.start_time, - clip.end if self.end_time is None else self.end_time, - clip.nchannels, - ), - keep_duration=True, - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/io/AudioFileClip.html b/_modules/moviepy/audio/io/AudioFileClip.html deleted file mode 100644 index d307fbdd9..000000000 --- a/_modules/moviepy/audio/io/AudioFileClip.html +++ /dev/null @@ -1,527 +0,0 @@ - - - - - - - - - - - moviepy.audio.io.AudioFileClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.io.AudioFileClip

-"""Implements AudioFileClip, a class for audio clips creation using audio files."""
-
-from moviepy.audio.AudioClip import AudioClip
-from moviepy.audio.io.readers import FFMPEG_AudioReader
-from moviepy.decorators import convert_path_to_string
-
-
-
[docs]class AudioFileClip(AudioClip): - """ - An audio clip read from a sound file, or an array. - The whole file is not loaded in memory. Instead, only a portion is - read and stored in memory. this portion includes frames before - and after the last frames read, so that it is fast to read the sound - backward and forward. - - Parameters - ---------- - - filename - Either a soundfile name (of any extension supported by ffmpeg) - as a string or a path-like object, - or an array representing a sound. If the soundfile is not a .wav, - it will be converted to .wav first, using the ``fps`` and - ``bitrate`` arguments. - - buffersize: - Size to load in memory (in number of frames) - - - Attributes - ---------- - - nbytes - Number of bits per frame of the original audio file. - - fps - Number of frames per second in the audio file - - buffersize - See Parameters. - - Lifetime - -------- - - Note that this creates subprocesses and locks files. If you construct one - of these instances, you must call close() afterwards, or the subresources - will not be cleaned up until the process ends. - - Examples - -------- - - >>> snd = AudioFileClip("song.wav") - >>> snd.close() - """ - - @convert_path_to_string("filename") - def __init__( - self, filename, decode_file=False, buffersize=200000, nbytes=2, fps=44100 - ): - AudioClip.__init__(self) - - self.filename = filename - self.reader = FFMPEG_AudioReader( - filename, - decode_file=decode_file, - fps=fps, - nbytes=nbytes, - buffersize=buffersize, - ) - self.fps = fps - self.duration = self.reader.duration - self.end = self.reader.duration - self.buffersize = self.reader.buffersize - self.filename = filename - - self.make_frame = lambda t: self.reader.get_frame(t) - self.nchannels = self.reader.nchannels - -
[docs] def close(self): - """Close the internal reader.""" - if self.reader: - self.reader.close() - self.reader = None
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/io/ffmpeg_audiowriter.html b/_modules/moviepy/audio/io/ffmpeg_audiowriter.html deleted file mode 100644 index 3021d530e..000000000 --- a/_modules/moviepy/audio/io/ffmpeg_audiowriter.html +++ /dev/null @@ -1,658 +0,0 @@ - - - - - - - - - - - moviepy.audio.io.ffmpeg_audiowriter — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.io.ffmpeg_audiowriter

-"""MoviePy audio writing with ffmpeg."""
-
-import subprocess as sp
-
-import proglog
-
-from moviepy.config import FFMPEG_BINARY
-from moviepy.decorators import requires_duration
-from moviepy.tools import cross_platform_popen_params
-
-
-
[docs]class FFMPEG_AudioWriter: - """ - A class to write an AudioClip into an audio file. - - Parameters - ---------- - - filename - Name of any video or audio file, like ``video.mp4`` or ``sound.wav`` etc. - - size - Size (width,height) in pixels of the output video. - - fps_input - Frames per second of the input audio (given by the AUdioClip being - written down). - - codec - Name of the ffmpeg codec to use for the output. - - bitrate: - A string indicating the bitrate of the final video. Only - relevant for codecs which accept a bitrate. - - """ - - def __init__( - self, - filename, - fps_input, - nbytes=2, - nchannels=2, - codec="libfdk_aac", - bitrate=None, - input_video=None, - logfile=None, - ffmpeg_params=None, - ): - if logfile is None: - logfile = sp.PIPE - self.logfile = logfile - self.filename = filename - self.codec = codec - self.ext = self.filename.split(".")[-1] - - # order is important - cmd = [ - FFMPEG_BINARY, - "-y", - "-loglevel", - "error" if logfile == sp.PIPE else "info", - "-f", - "s%dle" % (8 * nbytes), - "-acodec", - "pcm_s%dle" % (8 * nbytes), - "-ar", - "%d" % fps_input, - "-ac", - "%d" % nchannels, - "-i", - "-", - ] - if input_video is None: - cmd.extend(["-vn"]) - else: - cmd.extend(["-i", input_video, "-vcodec", "copy"]) - - cmd.extend(["-acodec", codec] + ["-ar", "%d" % fps_input]) - cmd.extend(["-strict", "-2"]) # needed to support codec 'aac' - if bitrate is not None: - cmd.extend(["-ab", bitrate]) - if ffmpeg_params is not None: - cmd.extend(ffmpeg_params) - cmd.extend([filename]) - - popen_params = cross_platform_popen_params( - {"stdout": sp.DEVNULL, "stderr": logfile, "stdin": sp.PIPE} - ) - - self.proc = sp.Popen(cmd, **popen_params) - -
[docs] def write_frames(self, frames_array): - """Send the audio frame (a chunck of ``AudioClip``) to ffmpeg for writting""" - try: - self.proc.stdin.write(frames_array.tobytes()) - except IOError as err: - _, ffmpeg_error = self.proc.communicate() - if ffmpeg_error is not None: - ffmpeg_error = ffmpeg_error.decode() - else: - # The error was redirected to a logfile with `write_logfile=True`, - # so read the error from that file instead - self.logfile.seek(0) - ffmpeg_error = self.logfile.read() - - error = ( - f"{err}\n\nMoviePy error: FFMPEG encountered the following error while " - f"writing file {self.filename}:\n\n {ffmpeg_error}" - ) - - if "Unknown encoder" in ffmpeg_error: - error += ( - "\n\nThe audio export failed because FFMPEG didn't find the " - f"specified codec for audio encoding {self.codec}. " - "Please install this codec or change the codec when calling " - "write_videofile or write_audiofile.\nFor instance for mp3:\n" - " >>> write_videofile('myvid.mp4', audio_codec='libmp3lame')" - ) - - elif "incorrect codec parameters ?" in ffmpeg_error: - error += ( - "\n\nThe audio export failed, possibly because the " - f"codec specified for the video {self.codec} is not compatible" - f" with the given extension {self.ext}. Please specify a " - "valid 'codec' argument in write_audiofile or 'audio_codoc'" - "argument in write_videofile. This would be " - "'libmp3lame' for mp3, 'libvorbis' for ogg..." - ) - - elif "bitrate not specified" in ffmpeg_error: - error += ( - "\n\nThe audio export failed, possibly because the " - "bitrate you specified was too high or too low for " - "the audio codec." - ) - - elif "Invalid encoder type" in ffmpeg_error: - error += ( - "\n\nThe audio export failed because the codec " - "or file extension you provided is not suitable for audio" - ) - - raise IOError(error)
- -
[docs] def close(self): - """Closes the writer, terminating the subprocess if is still alive.""" - if hasattr(self, "proc") and self.proc: - self.proc.stdin.close() - self.proc.stdin = None - if self.proc.stderr is not None: - self.proc.stderr.close() - self.proc.stderr = None - # If this causes deadlocks, consider terminating instead. - self.proc.wait() - self.proc = None
- - def __del__(self): - # If the garbage collector comes, make sure the subprocess is terminated. - self.close() - - # Support the Context Manager protocol, to ensure that resources are cleaned up. - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close()
- - -
[docs]@requires_duration -def ffmpeg_audiowrite( - clip, - filename, - fps, - nbytes, - buffersize, - codec="libvorbis", - bitrate=None, - write_logfile=False, - ffmpeg_params=None, - logger="bar", -): - """ - A function that wraps the FFMPEG_AudioWriter to write an AudioClip - to a file. - """ - if write_logfile: - logfile = open(filename + ".log", "w+") - else: - logfile = None - logger = proglog.default_bar_logger(logger) - logger(message="MoviePy - Writing audio in %s" % filename) - writer = FFMPEG_AudioWriter( - filename, - fps, - nbytes, - clip.nchannels, - codec=codec, - bitrate=bitrate, - logfile=logfile, - ffmpeg_params=ffmpeg_params, - ) - - for chunk in clip.iter_chunks( - chunksize=buffersize, quantize=True, nbytes=nbytes, fps=fps, logger=logger - ): - writer.write_frames(chunk) - - writer.close() - - if write_logfile: - logfile.close() - logger(message="MoviePy - Done.")
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/io/ffplay_audiopreviewer.html b/_modules/moviepy/audio/io/ffplay_audiopreviewer.html deleted file mode 100644 index 965779732..000000000 --- a/_modules/moviepy/audio/io/ffplay_audiopreviewer.html +++ /dev/null @@ -1,598 +0,0 @@ - - - - - - - - - - - moviepy.audio.io.ffplay_audiopreviewer — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.io.ffplay_audiopreviewer

-"""MoviePy audio writing with ffmpeg."""
-
-import subprocess as sp
-
-from moviepy.config import FFPLAY_BINARY
-from moviepy.decorators import requires_duration
-from moviepy.tools import cross_platform_popen_params
-
-
-
[docs]class FFPLAY_AudioPreviewer: - """ - A class to preview an AudioClip. - - Parameters - ---------- - - fps_input - Frames per second of the input audio (given by the AUdioClip being - written down). - - nbytes: - Number of bytes to encode the sound: 1 for 8bit sound, 2 for - 16bit, 4 for 32bit sound. Default is 2 bytes, it's fine. - - nchannels: - Number of audio channels in the clip. Default to 2 channels. - - """ - - def __init__( - self, - fps_input, - nbytes=2, - nchannels=2, - ): - # order is important - cmd = [ - FFPLAY_BINARY, - "-autoexit", # If you dont precise, ffplay dont stop at end - "-nodisp", # If you dont precise a window is - "-f", - "s%dle" % (8 * nbytes), - "-ar", - "%d" % fps_input, - "-ac", - "%d" % nchannels, - "-i", - "-", - ] - - popen_params = cross_platform_popen_params( - {"stdout": sp.DEVNULL, "stderr": sp.STDOUT, "stdin": sp.PIPE} - ) - - self.proc = sp.Popen(cmd, **popen_params) - -
[docs] def write_frames(self, frames_array): - """Send a raw audio frame (a chunck of audio) to ffplay to be played""" - try: - self.proc.stdin.write(frames_array.tobytes()) - except IOError as err: - _, ffplay_error = self.proc.communicate() - if ffplay_error is not None: - ffplay_error = ffplay_error.decode() - else: - # The error was redirected to a logfile with `write_logfile=True`, - # so read the error from that file instead - self.logfile.seek(0) - ffplay_error = self.logfile.read() - - error = ( - f"{err}\n\nMoviePy error: FFPLAY encountered the following error while " - f":\n\n {ffplay_error}" - ) - - raise IOError(error)
- -
[docs] def close(self): - """Closes the writer, terminating the subprocess if is still alive.""" - if hasattr(self, "proc") and self.proc: - self.proc.stdin.close() - self.proc.stdin = None - if self.proc.stderr is not None: - self.proc.stderr.close() - self.proc.stderr = None - # If this causes deadlocks, consider terminating instead. - self.proc.wait() - self.proc = None
- - def __del__(self): - # If the garbage collector comes, make sure the subprocess is terminated. - self.close() - - # Support the Context Manager protocol, to ensure that resources are cleaned up. - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close()
- - -
[docs]@requires_duration -def ffplay_audiopreview( - clip, fps=None, buffersize=2000, nbytes=2, audio_flag=None, video_flag=None -): - """ - A function that wraps the FFPLAY_AudioPreviewer to preview an AudioClip - - Parameters - ---------- - - fps - Frame rate of the sound. 44100 gives top quality, but may cause - problems if your computer is not fast enough and your clip is - complicated. If the sound jumps during the preview, lower it - (11025 is still fine, 5000 is tolerable). - - buffersize - The sound is not generated all at once, but rather made by bunches - of frames (chunks). ``buffersize`` is the size of such a chunk. - Try varying it if you meet audio problems (but you shouldn't - have to). - - nbytes: - Number of bytes to encode the sound: 1 for 8bit sound, 2 for - 16bit, 4 for 32bit sound. 2 bytes is fine. - - audio_flag, video_flag: - Instances of class threading events that are used to synchronize - video and audio during ``VideoClip.preview()``. - """ - if not fps: - if not clip.fps: - fps = 44100 - else: - fps = clip.fps - - with FFPLAY_AudioPreviewer(fps, nbytes, clip.nchannels) as previewer: - first_frame = True - for chunk in clip.iter_chunks( - chunksize=buffersize, quantize=True, nbytes=nbytes, fps=fps - ): - # On first frame, wait for video - if first_frame: - first_frame = False - - if audio_flag is not None: - audio_flag.set() # Say to video that audio is ready - - if video_flag is not None: - video_flag.wait() # Wait for video to be ready - - previewer.write_frames(chunk)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/io/readers.html b/_modules/moviepy/audio/io/readers.html deleted file mode 100644 index d931423e7..000000000 --- a/_modules/moviepy/audio/io/readers.html +++ /dev/null @@ -1,737 +0,0 @@ - - - - - - - - - - - moviepy.audio.io.readers — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.io.readers

-"""MoviePy audio reading with ffmpeg."""
-
-import subprocess as sp
-import warnings
-
-import numpy as np
-
-from moviepy.config import FFMPEG_BINARY
-from moviepy.tools import cross_platform_popen_params
-from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
-
-
-
[docs]class FFMPEG_AudioReader: - """A class to read the audio in either video files or audio files - using ffmpeg. ffmpeg will read any audio and transform them into - raw data. - - Parameters - ---------- - - filename - Name of any video or audio file, like ``video.mp4`` or - ``sound.wav`` etc. - - buffersize - The size of the buffer to use. Should be bigger than the buffer - used by ``write_audiofile`` - - print_infos - Print the ffmpeg infos on the file being read (for debugging) - - fps - Desired frames per second in the decoded signal that will be - received from ffmpeg - - nbytes - Desired number of bytes (1,2,4) in the signal that will be - received from ffmpeg - """ - - def __init__( - self, - filename, - buffersize, - decode_file=False, - print_infos=False, - fps=44100, - nbytes=2, - nchannels=2, - ): - # TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader - # E.g. here self.pos is still 1-indexed. - # (or have them inherit from a shared parent class) - self.filename = filename - self.nbytes = nbytes - self.fps = fps - self.format = "s%dle" % (8 * nbytes) - self.codec = "pcm_s%dle" % (8 * nbytes) - self.nchannels = nchannels - infos = ffmpeg_parse_infos(filename, decode_file=decode_file) - self.duration = infos["duration"] - self.bitrate = infos["audio_bitrate"] - self.infos = infos - self.proc = None - - self.n_frames = int(self.fps * self.duration) - self.buffersize = min(self.n_frames + 1, buffersize) - self.buffer = None - self.buffer_startframe = 1 - self.initialize() - self.buffer_around(1) - -
[docs] def initialize(self, start_time=0): - """Opens the file, creates the pipe.""" - self.close() # if any - - if start_time != 0: - offset = min(1, start_time) - i_arg = [ - "-ss", - "%.05f" % (start_time - offset), - "-i", - self.filename, - "-vn", - "-ss", - "%.05f" % offset, - ] - else: - i_arg = ["-i", self.filename, "-vn"] - - cmd = ( - [FFMPEG_BINARY] - + i_arg - + [ - "-loglevel", - "error", - "-f", - self.format, - "-acodec", - self.codec, - "-ar", - "%d" % self.fps, - "-ac", - "%d" % self.nchannels, - "-", - ] - ) - - popen_params = cross_platform_popen_params( - { - "bufsize": self.buffersize, - "stdout": sp.PIPE, - "stderr": sp.PIPE, - "stdin": sp.DEVNULL, - } - ) - - self.proc = sp.Popen(cmd, **popen_params) - - self.pos = np.round(self.fps * start_time)
- -
[docs] def skip_chunk(self, chunksize): - """Skip a chunk of audio data by reading and discarding the specified number of - frames from the audio stream. The audio stream is read from the `proc` stdout. - After skipping the chunk, the `pos` attribute is updated accordingly. - - Parameters - ---------- - chunksize (int): - The number of audio frames to skip. - """ - _ = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes) - self.proc.stdout.flush() - self.pos = self.pos + chunksize
- -
[docs] def read_chunk(self, chunksize): - """Read a chunk of audio data from the audio stream. - - This method reads a chunk of audio data from the audio stream. The - specified number of frames, given by `chunksize`, is read from the - `proc` stdout. The audio data is returned as a NumPy array, where - each row corresponds to a frame and each column corresponds to a - channel. If there is not enough audio left to read, the remaining - portion is padded with zeros, ensuring that the returned array has - the desired length. The `pos` attribute is updated accordingly. - - Parameters - ---------- - chunksize (float): - The desired number of audio frames to read. - - """ - # chunksize is not being autoconverted from float to int - chunksize = int(round(chunksize)) - s = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes) - data_type = {1: "int8", 2: "int16", 4: "int32"}[self.nbytes] - if hasattr(np, "frombuffer"): - result = np.frombuffer(s, dtype=data_type) - else: - result = np.fromstring(s, dtype=data_type) - result = (1.0 * result / 2 ** (8 * self.nbytes - 1)).reshape( - (int(len(result) / self.nchannels), self.nchannels) - ) - - # Pad the read chunk with zeros when there isn't enough audio - # left to read, so the buffer is always at full length. - pad = np.zeros((chunksize - len(result), self.nchannels), dtype=result.dtype) - result = np.concatenate([result, pad]) - # self.proc.stdout.flush() - self.pos = self.pos + chunksize - return result
- -
[docs] def seek(self, pos): - """Read a frame at time t. Note for coders: getting an arbitrary - frame in the video with ffmpeg can be painfully slow if some - decoding has to be done. This function tries to avoid fectching - arbitrary frames whenever possible, by moving between adjacent - frames. - """ - if (pos < self.pos) or (pos > (self.pos + 1000000)): - t = 1.0 * pos / self.fps - self.initialize(t) - elif pos > self.pos: - # print pos - self.skip_chunk(pos - self.pos) - # last case standing: pos = current pos - self.pos = pos
- -
[docs] def get_frame(self, tt): - """Retrieve the audio frame(s) corresponding to the given timestamp(s). - - Parameters - ---------- - tt (float or numpy.ndarray): - The timestamp(s) at which to retrieve the audio frame(s). - If `tt` is a single float value, the frame corresponding to that - timestamp is returned. If `tt` is a NumPy array of timestamps, an - array of frames corresponding to each timestamp is returned. - """ - if isinstance(tt, np.ndarray): - # lazy implementation, but should not cause problems in - # 99.99 % of the cases - - # elements of t that are actually in the range of the - # audio file. - in_time = (tt >= 0) & (tt < self.duration) - - # Check that the requested time is in the valid range - if not in_time.any(): - raise IOError( - "Error in file %s, " % (self.filename) - + "Accessing time t=%.02f-%.02f seconds, " % (tt[0], tt[-1]) - + "with clip duration=%f seconds, " % self.duration - ) - - # The np.round in the next line is super-important. - # Removing it results in artifacts in the noise. - frames = np.round((self.fps * tt)).astype(int)[in_time] - fr_min, fr_max = frames.min(), frames.max() - - if not (0 <= (fr_min - self.buffer_startframe) < len(self.buffer)): - self.buffer_around(fr_min) - elif not (0 <= (fr_max - self.buffer_startframe) < len(self.buffer)): - self.buffer_around(fr_max) - - try: - result = np.zeros((len(tt), self.nchannels)) - indices = frames - self.buffer_startframe - result[in_time] = self.buffer[indices] - return result - - except IndexError as error: - warnings.warn( - "Error in file %s, " % (self.filename) - + "At time t=%.02f-%.02f seconds, " % (tt[0], tt[-1]) - + "indices wanted: %d-%d, " % (indices.min(), indices.max()) - + "but len(buffer)=%d\n" % (len(self.buffer)) - + str(error), - UserWarning, - ) - - # repeat the last frame instead - indices[indices >= len(self.buffer)] = len(self.buffer) - 1 - result[in_time] = self.buffer[indices] - return result - - else: - ind = int(self.fps * tt) - if ind < 0 or ind > self.n_frames: # out of time: return 0 - return np.zeros(self.nchannels) - - if not (0 <= (ind - self.buffer_startframe) < len(self.buffer)): - # out of the buffer: recenter the buffer - self.buffer_around(ind) - - # read the frame in the buffer - return self.buffer[ind - self.buffer_startframe]
- -
[docs] def buffer_around(self, frame_number): - """Fill the buffer with frames, centered on frame_number if possible.""" - # start-frame for the buffer - new_bufferstart = max(0, frame_number - self.buffersize // 2) - - if self.buffer is not None: - current_f_end = self.buffer_startframe + self.buffersize - if new_bufferstart < current_f_end < new_bufferstart + self.buffersize: - # We already have part of what must be read - conserved = current_f_end - new_bufferstart - chunksize = self.buffersize - conserved - array = self.read_chunk(chunksize) - self.buffer = np.vstack([self.buffer[-conserved:], array]) - else: - self.seek(new_bufferstart) - self.buffer = self.read_chunk(self.buffersize) - else: - self.seek(new_bufferstart) - self.buffer = self.read_chunk(self.buffersize) - - self.buffer_startframe = new_bufferstart
- -
[docs] def close(self): - """Closes the reader, terminating the subprocess if is still alive.""" - if self.proc: - if self.proc.poll() is None: - self.proc.terminate() - self.proc.stdout.close() - self.proc.stderr.close() - self.proc.wait() - self.proc = None
- - def __del__(self): - # If the garbage collector comes, make sure the subprocess is terminated. - self.close()
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/audio/tools/cuts.html b/_modules/moviepy/audio/tools/cuts.html deleted file mode 100644 index 145d7eb02..000000000 --- a/_modules/moviepy/audio/tools/cuts.html +++ /dev/null @@ -1,473 +0,0 @@ - - - - - - - - - - - moviepy.audio.tools.cuts — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.audio.tools.cuts

-"""Cutting utilities working with audio."""
-
-import numpy as np
-
-
-
[docs]def find_audio_period(clip, min_time=0.1, max_time=2, time_resolution=0.01): - """Finds the period, in seconds of an audioclip. - - Parameters - ---------- - - min_time : float, optional - Minimum bound for the returned value. - - max_time : float, optional - Maximum bound for the returned value. - - time_resolution : float, optional - Numerical precision. - """ - chunksize = int(time_resolution * clip.fps) - chunk_duration = 1.0 * chunksize / clip.fps - # v denotes the list of volumes - v = np.array([(chunk**2).sum() for chunk in clip.iter_chunks(chunksize)]) - v = v - v.mean() - corrs = np.correlate(v, v, mode="full")[-len(v) :] - corrs[: int(min_time / chunk_duration)] = 0 - corrs[int(max_time / chunk_duration) :] = 0 - return chunk_duration * np.argmax(corrs)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/config.html b/_modules/moviepy/config.html deleted file mode 100644 index ea1b4f6c8..000000000 --- a/_modules/moviepy/config.html +++ /dev/null @@ -1,535 +0,0 @@ - - - - - - - - - - - moviepy.config — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.config

-"""Third party programs configuration for MoviePy."""
-
-import os
-import subprocess as sp
-from pathlib import Path
-
-from moviepy.tools import cross_platform_popen_params
-
-
-try:
-    from dotenv import find_dotenv, load_dotenv
-
-    DOTENV = find_dotenv()
-    load_dotenv(DOTENV)
-except ImportError:
-    DOTENV = None
-
-FFMPEG_BINARY = os.getenv("FFMPEG_BINARY", "ffmpeg-imageio")
-FFPLAY_BINARY = os.getenv("FFPLAY_BINARY", "auto-detect")
-
-IS_POSIX_OS = os.name == "posix"
-
-
-
[docs]def try_cmd(cmd): - """Verify if the OS support command invocation as expected by moviepy""" - try: - popen_params = cross_platform_popen_params( - {"stdout": sp.PIPE, "stderr": sp.PIPE, "stdin": sp.DEVNULL} - ) - proc = sp.Popen(cmd, **popen_params) - proc.communicate() - except Exception as err: - return False, err - else: - return True, None
- - -if FFMPEG_BINARY == "ffmpeg-imageio": - from imageio.plugins.ffmpeg import get_exe - - FFMPEG_BINARY = get_exe() - -elif FFMPEG_BINARY == "auto-detect": - if try_cmd(["ffmpeg"])[0]: - FFMPEG_BINARY = "ffmpeg" - elif not IS_POSIX_OS and try_cmd(["ffmpeg.exe"])[0]: - FFMPEG_BINARY = "ffmpeg.exe" - else: # pragma: no cover - FFMPEG_BINARY = "unset" -else: - success, err = try_cmd([FFMPEG_BINARY]) - if not success: - raise IOError( - f"{err} - The path specified for the ffmpeg binary might be wrong" - ) - - -if FFPLAY_BINARY == "auto-detect": - if try_cmd(["ffplay"])[0]: - FFPLAY_BINARY = "ffplay" - elif not IS_POSIX_OS and try_cmd(["ffplay.exe"])[0]: - FFPLAY_BINARY = "ffplay.exe" - else: # pragma: no cover - FFPLAY_BINARY = "unset" -else: - success, err = try_cmd([FFPLAY_BINARY]) - if not success: - raise IOError( - f"{err} - The path specified for the ffmpeg binary might be wrong" - ) - - -
[docs]def check(): - """Check if moviepy has found the binaries for FFmpeg.""" - if try_cmd([FFMPEG_BINARY])[0]: - print(f"MoviePy: ffmpeg successfully found in '{FFMPEG_BINARY}'.") - else: # pragma: no cover - print(f"MoviePy: can't find or access ffmpeg in '{FFMPEG_BINARY}'.") - - if try_cmd([FFPLAY_BINARY])[0]: - print(f"MoviePy: ffmpeg successfully found in '{FFPLAY_BINARY}'.") - else: # pragma: no cover - print(f"MoviePy: can't find or access ffmpeg in '{FFPLAY_BINARY}'.") - - if DOTENV: - print(f"\n.env file content at {DOTENV}:\n") - print(Path(DOTENV).read_text())
- - -if __name__ == "__main__": # pragma: no cover - check() -
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/decorators.html b/_modules/moviepy/decorators.html deleted file mode 100644 index bca94d10e..000000000 --- a/_modules/moviepy/decorators.html +++ /dev/null @@ -1,587 +0,0 @@ - - - - - - - - - - - moviepy.decorators — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.decorators

-"""Decorators used by moviepy."""
-
-import inspect
-import os
-
-import decorator
-
-from moviepy.tools import convert_to_seconds
-
-
-@decorator.decorator
-def outplace(func, clip, *args, **kwargs):
-    """Applies ``func(clip.copy(), *args, **kwargs)`` and returns ``clip.copy()``."""
-    new_clip = clip.copy()
-    func(new_clip, *args, **kwargs)
-    return new_clip
-
-
-@decorator.decorator
-def convert_masks_to_RGB(func, clip, *args, **kwargs):
-    """If the clip is a mask, convert it to RGB before running the function."""
-    if clip.is_mask:
-        clip = clip.to_RGB()
-    return func(clip, *args, **kwargs)
-
-
-@decorator.decorator
-def apply_to_mask(func, clip, *args, **kwargs):
-    """Applies the same function ``func`` to the mask of the clip created with
-    ``func``.
-    """
-    new_clip = func(clip, *args, **kwargs)
-    if getattr(new_clip, "mask", None):
-        new_clip.mask = func(new_clip.mask, *args, **kwargs)
-    return new_clip
-
-
-@decorator.decorator
-def apply_to_audio(func, clip, *args, **kwargs):
-    """Applies the function ``func`` to the audio of the clip created with ``func``."""
-    new_clip = func(clip, *args, **kwargs)
-    if getattr(new_clip, "audio", None):
-        new_clip.audio = func(new_clip.audio, *args, **kwargs)
-    return new_clip
-
-
-@decorator.decorator
-def requires_duration(func, clip, *args, **kwargs):
-    """Raises an error if the clip has no duration."""
-    if clip.duration is None:
-        raise ValueError("Attribute 'duration' not set")
-    else:
-        return func(clip, *args, **kwargs)
-
-
-@decorator.decorator
-def requires_fps(func, clip, *args, **kwargs):
-    """Raises an error if the clip has no fps."""
-    if not hasattr(clip, "fps") or clip.fps is None:
-        raise ValueError("Attribute 'fps' not set")
-    else:
-        return func(clip, *args, **kwargs)
-
-
-@decorator.decorator
-def audio_video_effect(func, effect, clip, *args, **kwargs):
-    """Use an audio function on a video/audio clip.
-
-    This decorator tells that the function func (audioclip -> audioclip)
-    can be also used on a video clip, at which case it returns a
-    videoclip with unmodified video and modified audio.
-    """
-    if hasattr(clip, "audio"):
-        if clip.audio is not None:
-            clip.audio = func(effect, clip.audio, *args, **kwargs)
-        return clip
-    else:
-        return func(effect, clip, *args, **kwargs)
-
-
-
[docs]def preprocess_args(fun, varnames): - """Applies fun to variables in varnames before launching the function.""" - - def wrapper(func, *args, **kwargs): - names = inspect.getfullargspec(func).args - new_args = [ - fun(arg) if (name in varnames) and (arg is not None) else arg - for (arg, name) in zip(args, names) - ] - new_kwargs = { - kwarg: fun(value) if kwarg in varnames else value - for (kwarg, value) in kwargs.items() - } - return func(*new_args, **new_kwargs) - - return decorator.decorator(wrapper)
- - -
[docs]def convert_parameter_to_seconds(varnames): - """Converts the specified variables to seconds.""" - return preprocess_args(convert_to_seconds, varnames)
- - -
[docs]def convert_path_to_string(varnames): - """Converts the specified variables to a path string.""" - return preprocess_args(os.fspath, varnames)
- - -@decorator.decorator -def add_mask_if_none(func, clip, *args, **kwargs): - """Add a mask to the clip if there is none.""" - if clip.mask is None: - clip = clip.with_add_mask() - return func(clip, *args, **kwargs) - - -@decorator.decorator -def use_clip_fps_by_default(func, clip, *args, **kwargs): - """Will use ``clip.fps`` if no ``fps=...`` is provided in **kwargs**.""" - - def find_fps(fps): - if fps is not None: - return fps - elif getattr(clip, "fps", None): - return clip.fps - raise AttributeError( - "No 'fps' (frames per second) attribute specified" - " for function %s and the clip has no 'fps' attribute. Either" - " provide e.g. fps=24 in the arguments of the function, or define" - " the clip's fps with `clip.fps=24`" % func.__name__ - ) - - names = inspect.getfullargspec(func).args[1:] - - new_args = [ - find_fps(arg) if (name == "fps") else arg for (arg, name) in zip(args, names) - ] - new_kwargs = { - kwarg: find_fps(value) if kwarg == "fps" else value - for (kwarg, value) in kwargs.items() - } - - return func(clip, *new_args, **new_kwargs) -
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/tools.html b/_modules/moviepy/tools.html deleted file mode 100644 index ef24df8f4..000000000 --- a/_modules/moviepy/tools.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - - - - - - moviepy.tools — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.tools

-"""Misc. useful functions that can be used at many places in the program."""
-
-import os
-import platform
-import subprocess as sp
-import warnings
-
-import proglog
-
-
-OS_NAME = os.name
-
-
-
[docs]def cross_platform_popen_params(popen_params): - """Wrap with this function a dictionary of ``subprocess.Popen`` kwargs and - will be ready to work without unexpected behaviours in any platform. - Currently, the implementation will add to them: - - - ``creationflags=0x08000000``: no extra unwanted window opens on Windows - when the child process is created. Only added on Windows. - """ - if OS_NAME == "nt": - popen_params["creationflags"] = 0x08000000 - return popen_params
- - -
[docs]def subprocess_call(cmd, logger="bar"): - """Executes the given subprocess command. - - Set logger to None or a custom Proglog logger to avoid printings. - """ - logger = proglog.default_bar_logger(logger) - logger(message="MoviePy - Running:\n>>> " + " ".join(cmd)) - - popen_params = cross_platform_popen_params( - {"stdout": sp.DEVNULL, "stderr": sp.PIPE, "stdin": sp.DEVNULL} - ) - - proc = sp.Popen(cmd, **popen_params) - - out, err = proc.communicate() # proc.wait() - proc.stderr.close() - - if proc.returncode: - logger(message="MoviePy - Command returned an error") - raise IOError(err.decode("utf8")) - else: - logger(message="MoviePy - Command successful") - - del proc
- - -
[docs]def convert_to_seconds(time): - """Will convert any time into seconds. - - If the type of `time` is not valid, - it's returned as is. - - Here are the accepted formats: - - >>> convert_to_seconds(15.4) # seconds - 15.4 - >>> convert_to_seconds((1, 21.5)) # (min,sec) - 81.5 - >>> convert_to_seconds((1, 1, 2)) # (hr, min, sec) - 3662 - >>> convert_to_seconds('01:01:33.045') - 3693.045 - >>> convert_to_seconds('01:01:33,5') # coma works too - 3693.5 - >>> convert_to_seconds('1:33,5') # only minutes and secs - 99.5 - >>> convert_to_seconds('33.5') # only secs - 33.5 - """ - factors = (1, 60, 3600) - - if isinstance(time, str): - time = [float(part.replace(",", ".")) for part in time.split(":")] - - if not isinstance(time, (tuple, list)): - return time - - return sum(mult * part for mult, part in zip(factors, reversed(time)))
- - -
[docs]def deprecated_version_of(func, old_name): - """Indicates that a function is deprecated and has a new name. - - `func` is the new function and `old_name` is the name of the deprecated - function. - - Returns - ------- - - deprecated_func - A function that does the same thing as `func`, but with a docstring - and a printed message on call which say that the function is - deprecated and that you should use `func` instead. - - Examples - -------- - - >>> # The badly named method 'to_file' is replaced by 'write_file' - >>> class Clip: - >>> def write_file(self, some args): - >>> # blablabla - >>> - >>> Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file') - """ - # Detect new name of func - new_name = func.__name__ - - warning = ( - "The function ``%s`` is deprecated and is kept temporarily " - "for backwards compatibility.\nPlease use the new name, " - "``%s``, instead." - ) % (old_name, new_name) - - def deprecated_func(*args, **kwargs): - warnings.warn("MoviePy: " + warning, PendingDeprecationWarning) - return func(*args, **kwargs) - - deprecated_func.__doc__ = warning - - return deprecated_func
- - -# Non-exhaustive dictionary to store default information. -# Any addition is most welcome. -# Note that 'gif' is complicated to place. From a VideoFileClip point of view, -# it is a video, but from a HTML5 point of view, it is an image. - -extensions_dict = { - "mp4": {"type": "video", "codec": ["libx264", "libmpeg4", "aac"]}, - "mkv": {"type": "video", "codec": ["libx264", "libmpeg4", "aac"]}, - "ogv": {"type": "video", "codec": ["libtheora"]}, - "webm": {"type": "video", "codec": ["libvpx"]}, - "avi": {"type": "video"}, - "mov": {"type": "video"}, - "ogg": {"type": "audio", "codec": ["libvorbis"]}, - "mp3": {"type": "audio", "codec": ["libmp3lame"]}, - "wav": {"type": "audio", "codec": ["pcm_s16le", "pcm_s24le", "pcm_s32le"]}, - "m4a": {"type": "audio", "codec": ["libfdk_aac"]}, -} - -for ext in ["jpg", "jpeg", "png", "bmp", "tiff"]: - extensions_dict[ext] = {"type": "image"} - - -
[docs]def find_extension(codec): - """Returns the correspondent file extension for a codec. - - Parameters - ---------- - - codec : str - Video or audio codec name. - """ - if codec in extensions_dict: - # codec is already the extension - return codec - - for ext, infos in extensions_dict.items(): - if codec in infos.get("codec", []): - return ext - raise ValueError( - "The audio_codec you chose is unknown by MoviePy. " - "You should report this. In the meantime, you can " - "specify a temp_audiofile with the right extension " - "in write_videofile." - )
- - -
[docs]def close_all_clips(objects="globals", types=("audio", "video", "image")): - """Closes all clips in a context. - - Follows different strategies retrieving the namespace from which the clips - to close will be retrieved depending on the ``objects`` argument, and filtering - by type of clips depending on the ``types`` argument. - - Parameters - ---------- - - objects : str or dict, optional - - If is a string an the value is ``"globals"``, will close all the clips - contained by the ``globals()`` namespace. - - If is a dictionary, the values of the dictionary could be clips to close, - useful if you want to use ``locals()``. - - types : Iterable, optional - Set of types of clips to close, being "audio", "video" or "image" the supported - values. - """ - from moviepy.audio.io.AudioFileClip import AudioFileClip - from moviepy.video.io.VideoFileClip import VideoFileClip - from moviepy.video.VideoClip import ImageClip - - CLIP_TYPES = { - "audio": AudioFileClip, - "video": VideoFileClip, - "image": ImageClip, - } - - if objects == "globals": # pragma: no cover - objects = globals() - if hasattr(objects, "values"): - objects = objects.values() - types_tuple = tuple(CLIP_TYPES[key] for key in types) - for obj in objects: - if isinstance(obj, types_tuple): - obj.close()
- - -
[docs]def no_display_available() -> bool: - """Return True if we determine the host system has no graphical environment. - This is usefull to remove tests requiring display, like preview - - ..info:: - Currently this only works for Linux/BSD systems with X11 or wayland. - It probably works for SunOS, AIX and CYGWIN - """ - system = platform.system() - if system in ["Linux", "FreeBSD", "NetBSD", "OpenBSD", "SunOS", "AIX"]: - if ("DISPLAY" not in os.environ) and ("WAYLAND_DISPLAY" not in os.environ): - return True - - if "CYGWIN_NT" in system: - if ("DISPLAY" not in os.environ) and ("WAYLAND_DISPLAY" not in os.environ): - return True - - return False
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/VideoClip.html b/_modules/moviepy/video/VideoClip.html deleted file mode 100644 index 3448a7ce9..000000000 --- a/_modules/moviepy/video/VideoClip.html +++ /dev/null @@ -1,2344 +0,0 @@ - - - - - - - - - - - moviepy.video.VideoClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.VideoClip

-"""Implements VideoClip (base class for video clips) and its main subclasses:
-
-- Animated clips:     VideoFileClip, ImageSequenceClip, BitmapClip
-- Static image clips: ImageClip, ColorClip, TextClip,
-"""
-
-import copy as _copy
-import os
-import threading
-from numbers import Real
-from typing import TYPE_CHECKING, List
-
-import numpy as np
-import proglog
-from imageio.v2 import imread as imread_v2
-from imageio.v3 import imwrite
-from PIL import Image, ImageDraw, ImageFont
-
-from moviepy.video.io.ffplay_previewer import ffplay_preview_video
-
-
-if TYPE_CHECKING:
-    from moviepy.Effect import Effect
-
-from moviepy.Clip import Clip
-from moviepy.decorators import (
-    add_mask_if_none,
-    apply_to_mask,
-    convert_masks_to_RGB,
-    convert_parameter_to_seconds,
-    convert_path_to_string,
-    outplace,
-    requires_duration,
-    requires_fps,
-    use_clip_fps_by_default,
-)
-from moviepy.tools import extensions_dict, find_extension
-from moviepy.video.fx.Crop import Crop
-from moviepy.video.fx.Resize import Resize
-from moviepy.video.fx.Rotate import Rotate
-from moviepy.video.io.ffmpeg_writer import ffmpeg_write_video
-from moviepy.video.io.gif_writers import write_gif_with_imageio
-from moviepy.video.tools.drawing import blit
-
-
-
[docs]class VideoClip(Clip): - """Base class for video clips. - - See ``VideoFileClip``, ``ImageClip`` etc. for more user-friendly classes. - - - Parameters - ---------- - - is_mask - `True` if the clip is going to be used as a mask. - - duration - Duration of the clip in seconds. If None we got a clip of infinite - duration - - has_constant_size - Define if clip size is constant or if it may vary with time. Default - to True - - - - Attributes - ---------- - - size - The size of the clip, (width,height), in pixels. - - w, h - The width and height of the clip, in pixels. - - is_mask - Boolean set to `True` if the clip is a mask. - - make_frame - A function ``t-> frame at time t`` where ``frame`` is a - w*h*3 RGB array. - - mask (default None) - VideoClip mask attached to this clip. If mask is ``None``, - The video clip is fully opaque. - - audio (default None) - An AudioClip instance containing the audio of the video clip. - - pos - A function ``t->(x,y)`` where ``x,y`` is the position - of the clip when it is composed with other clips. - See ``VideoClip.set_pos`` for more details - - relative_pos - See variable ``pos``. - - layer - Indicates which clip is rendered on top when two clips overlap in - a CompositeVideoClip. The highest number is rendered on top. - Default is 0. - - """ - - def __init__( - self, make_frame=None, is_mask=False, duration=None, has_constant_size=True - ): - super().__init__() - self.mask = None - self.audio = None - self.pos = lambda t: (0, 0) - self.relative_pos = False - self.layer = 0 - if make_frame: - self.make_frame = make_frame - self.size = self.get_frame(0).shape[:2][::-1] - self.is_mask = is_mask - self.has_constant_size = has_constant_size - if duration is not None: - self.duration = duration - self.end = duration - - @property - def w(self): - """Returns the width of the video.""" - return self.size[0] - - @property - def h(self): - """Returns the height of the video.""" - return self.size[1] - - @property - def aspect_ratio(self): - """Returns the aspect ratio of the video.""" - return self.w / float(self.h) - - @property - @requires_duration - @requires_fps - def n_frames(self): - """Returns the number of frames of the video.""" - return int(self.duration * self.fps) - - def __copy__(self): - """Mixed copy of the clip. - - Returns a shallow copy of the clip whose mask and audio will - be shallow copies of the clip's mask and audio if they exist. - - This method is intensively used to produce new clips every time - there is an outplace transformation of the clip (clip.resize, - clip.with_subclip, etc.) - - Acts like a deepcopy except for the fact that readers and other - possible unpickleables objects are not copied. - """ - cls = self.__class__ - new_clip = cls.__new__(cls) - for attr in self.__dict__: - value = getattr(self, attr) - if attr in ("mask", "audio"): - value = _copy.copy(value) - setattr(new_clip, attr, value) - return new_clip - - copy = __copy__ - - # =============================================================== - # EXPORT OPERATIONS - -
[docs] @convert_parameter_to_seconds(["t"]) - @convert_masks_to_RGB - def save_frame(self, filename, t=0, with_mask=True): - """Save a clip's frame to an image file. - - Saves the frame of clip corresponding to time ``t`` in ``filename``. - ``t`` can be expressed in seconds (15.35), in (min, sec), - in (hour, min, sec), or as a string: '01:03:05.35'. - - Parameters - ---------- - - filename : str - Name of the file in which the frame will be stored. - - t : float or tuple or str, optional - Moment of the frame to be saved. As default, the first frame will be - saved. - - with_mask : bool, optional - If is ``True`` the mask is saved in the alpha layer of the picture - (only works with PNGs). - """ - im = self.get_frame(t) - if with_mask and self.mask is not None: - mask = 255 * self.mask.get_frame(t) - im = np.dstack([im, mask]).astype("uint8") - else: - im = im.astype("uint8") - - imwrite(filename, im)
- -
[docs] @requires_duration - @use_clip_fps_by_default - @convert_masks_to_RGB - @convert_path_to_string(["filename", "temp_audiofile", "temp_audiofile_path"]) - def write_videofile( - self, - filename, - fps=None, - codec=None, - bitrate=None, - audio=True, - audio_fps=44100, - preset="medium", - audio_nbytes=4, - audio_codec=None, - audio_bitrate=None, - audio_bufsize=2000, - temp_audiofile=None, - temp_audiofile_path="", - remove_temp=True, - write_logfile=False, - threads=None, - ffmpeg_params=None, - logger="bar", - pixel_format=None, - ): - """Write the clip to a videofile. - - Parameters - ---------- - - filename - Name of the video file to write in, as a string or a path-like object. - The extension must correspond to the "codec" used (see below), - or simply be '.avi' (which will work with any codec). - - fps - Number of frames per second in the resulting video file. If None is - provided, and the clip has an fps attribute, this fps will be used. - - codec - Codec to use for image encoding. Can be any codec supported - by ffmpeg. If the filename is has extension '.mp4', '.ogv', '.webm', - the codec will be set accordingly, but you can still set it if you - don't like the default. For other extensions, the output filename - must be set accordingly. - - Some examples of codecs are: - - - ``'libx264'`` (default codec for file extension ``.mp4``) - makes well-compressed videos (quality tunable using 'bitrate'). - - ``'mpeg4'`` (other codec for extension ``.mp4``) can be an alternative - to ``'libx264'``, and produces higher quality videos by default. - - ``'rawvideo'`` (use file extension ``.avi``) will produce - a video of perfect quality, of possibly very huge size. - - ``png`` (use file extension ``.avi``) will produce a video - of perfect quality, of smaller size than with ``rawvideo``. - - ``'libvorbis'`` (use file extension ``.ogv``) is a nice video - format, which is completely free/ open source. However not - everyone has the codecs installed by default on their machine. - - ``'libvpx'`` (use file extension ``.webm``) is tiny a video - format well indicated for web videos (with HTML5). Open source. - - audio - Either ``True``, ``False``, or a file name. - If ``True`` and the clip has an audio clip attached, this - audio clip will be incorporated as a soundtrack in the movie. - If ``audio`` is the name of an audio file, this audio file - will be incorporated as a soundtrack in the movie. - - audio_fps - frame rate to use when generating the sound. - - temp_audiofile - the name of the temporary audiofile, as a string or path-like object, - to be created and then used to write the complete video, if any. - - temp_audiofile_path - the location that the temporary audiofile is placed, as a - string or path-like object. Defaults to the current working directory. - - audio_codec - Which audio codec should be used. Examples are 'libmp3lame' - for '.mp3', 'libvorbis' for 'ogg', 'libfdk_aac':'m4a', - 'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav. - Default is 'libmp3lame', unless the video extension is 'ogv' - or 'webm', at which case the default is 'libvorbis'. - - audio_bitrate - Audio bitrate, given as a string like '50k', '500k', '3000k'. - Will determine the size/quality of audio in the output file. - Note that it mainly an indicative goal, the bitrate won't - necessarily be the this in the final file. - - preset - Sets the time that FFMPEG will spend optimizing the compression. - Choices are: ultrafast, superfast, veryfast, faster, fast, medium, - slow, slower, veryslow, placebo. Note that this does not impact - the quality of the video, only the size of the video file. So - choose ultrafast when you are in a hurry and file size does not - matter. - - threads - Number of threads to use for ffmpeg. Can speed up the writing of - the video on multicore computers. - - ffmpeg_params - Any additional ffmpeg parameters you would like to pass, as a list - of terms, like ['-option1', 'value1', '-option2', 'value2']. - - write_logfile - If true, will write log files for the audio and the video. - These will be files ending with '.log' with the name of the - output file in them. - - logger - Either ``"bar"`` for progress bar or ``None`` or any Proglog logger. - - pixel_format - Pixel format for the output video file. - - Examples - -------- - - >>> from moviepy import VideoFileClip - >>> clip = VideoFileClip("myvideo.mp4").with_subclip(100,120) - >>> clip.write_videofile("my_new_video.mp4") - >>> clip.close() - - """ - name, ext = os.path.splitext(os.path.basename(filename)) - ext = ext[1:].lower() - logger = proglog.default_bar_logger(logger) - - if codec is None: - try: - codec = extensions_dict[ext]["codec"][0] - except KeyError: - raise ValueError( - "MoviePy couldn't find the codec associated " - "with the filename. Provide the 'codec' " - "parameter in write_videofile." - ) - - if audio_codec is None: - if ext in ["ogv", "webm"]: - audio_codec = "libvorbis" - else: - audio_codec = "libmp3lame" - elif audio_codec == "raw16": - audio_codec = "pcm_s16le" - elif audio_codec == "raw32": - audio_codec = "pcm_s32le" - - audiofile = audio if isinstance(audio, str) else None - make_audio = ( - (audiofile is None) and (audio is True) and (self.audio is not None) - ) - - if make_audio and temp_audiofile: - # The audio will be the clip's audio - audiofile = temp_audiofile - elif make_audio: - audio_ext = find_extension(audio_codec) - audiofile = os.path.join( - temp_audiofile_path, - name + Clip._TEMP_FILES_PREFIX + "wvf_snd.%s" % audio_ext, - ) - - # enough cpu for multiprocessing ? USELESS RIGHT NOW, WILL COME AGAIN - # enough_cpu = (multiprocessing.cpu_count() > 1) - logger(message="MoviePy - Building video %s." % filename) - if make_audio: - self.audio.write_audiofile( - audiofile, - audio_fps, - audio_nbytes, - audio_bufsize, - audio_codec, - bitrate=audio_bitrate, - write_logfile=write_logfile, - logger=logger, - ) - - ffmpeg_write_video( - self, - filename, - fps, - codec, - bitrate=bitrate, - preset=preset, - write_logfile=write_logfile, - audiofile=audiofile, - threads=threads, - ffmpeg_params=ffmpeg_params, - logger=logger, - pixel_format=pixel_format, - ) - - if remove_temp and make_audio: - if os.path.exists(audiofile): - os.remove(audiofile) - logger(message="MoviePy - video ready %s" % filename)
- -
[docs] @requires_duration - @use_clip_fps_by_default - @convert_masks_to_RGB - def write_images_sequence( - self, name_format, fps=None, with_mask=True, logger="bar" - ): - """Writes the videoclip to a sequence of image files. - - Parameters - ---------- - - name_format - A filename specifying the numerotation format and extension - of the pictures. For instance "frame%03d.png" for filenames - indexed with 3 digits and PNG format. Also possible: - "some_folder/frame%04d.jpeg", etc. - - fps - Number of frames per second to consider when writing the - clip. If not specified, the clip's ``fps`` attribute will - be used if it has one. - - with_mask - will save the clip's mask (if any) as an alpha canal (PNGs only). - - logger - Either ``"bar"`` for progress bar or ``None`` or any Proglog logger. - - - Returns - ------- - - names_list - A list of all the files generated. - - Notes - ----- - - The resulting image sequence can be read using e.g. the class - ``ImageSequenceClip``. - - """ - logger = proglog.default_bar_logger(logger) - # Fails on GitHub macos CI - # logger(message="MoviePy - Writing frames %s." % name_format) - - timings = np.arange(0, self.duration, 1.0 / fps) - - filenames = [] - for i, t in logger.iter_bar(t=list(enumerate(timings))): - name = name_format % i - filenames.append(name) - self.save_frame(name, t, with_mask=with_mask) - # logger(message="MoviePy - Done writing frames %s." % name_format) - - return filenames
- -
[docs] @requires_duration - @convert_masks_to_RGB - @convert_path_to_string("filename") - def write_gif( - self, - filename, - fps=None, - loop=0, - logger="bar", - ): - """Write the VideoClip to a GIF file. - - Converts a VideoClip into an animated GIF using imageio - - Parameters - ---------- - - filename - Name of the resulting gif file, as a string or a path-like object. - - fps - Number of frames per second (see note below). If it - isn't provided, then the function will look for the clip's - ``fps`` attribute (VideoFileClip, for instance, have one). - - loop : int, optional - Repeat the clip using ``loop`` iterations in the resulting GIF. - - progress_bar - If True, displays a progress bar - - - Notes - ----- - - The gif will be playing the clip in real time (you can - only change the frame rate). If you want the gif to be played - slower than the clip you will use :: - - >>> # slow down clip 50% and make it a gif - >>> myClip.multiply_speed(0.5).to_gif('myClip.gif') - - """ - # A little sketchy at the moment, maybe move all that in write_gif, - # refactor a little... we will see. - - write_gif_with_imageio( - self, - filename, - fps=fps, - loop=loop, - logger=logger, - )
- - # =============================================================== - # PREVIEW OPERATIONS - -
[docs] @convert_masks_to_RGB - @convert_parameter_to_seconds(["t"]) - def show(self, t=0, with_mask=True): - """Splashes the frame of clip corresponding to time ``t``. - - Parameters - ---------- - - t : float or tuple or str, optional - Time in seconds of the frame to display. - - with_mask : bool, optional - ``False`` if the clip has a mask but you want to see the clip without - the mask. - - Examples - -------- - - >>> from moviepy import * - >>> - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.show(t=4) - """ - clip = self.copy() - - # Warning : Comment to fix a bug on preview for compositevideoclip - # it broke compositevideoclip and it does nothing on normal clip with alpha - - # if with_mask and (self.mask is not None): - # # Hate it, but cannot figure a better way with python awful circular - # # dependency - # from mpy.video.compositing.CompositeVideoClip import CompositeVideoClip - # clip = CompositeVideoClip([self.with_position((0, 0))]) - - frame = clip.get_frame(t) - pil_img = Image.fromarray(frame.astype("uint8")) - - pil_img.show()
- -
[docs] @requires_duration - @convert_masks_to_RGB - def preview( - self, fps=15, audio=True, audio_fps=22050, audio_buffersize=3000, audio_nbytes=2 - ): - """Displays the clip in a window, at the given frames per second. - - It will avoid that the clip be played faster than normal, but it - cannot avoid the clip to be played slower than normal if the computations - are complex. In this case, try reducing the ``fps``. - - Parameters - ---------- - - fps : int, optional - Number of frames per seconds in the displayed video. Default to ``15``. - - audio : bool, optional - ``True`` (default) if you want the clip's audio be played during - the preview. - - audio_fps : int, optional - The frames per second to use when generating the audio sound. - - audio_buffersize : int, optional - The sized of the buffer used generating the audio sound. - - audio_nbytes : int, optional - The number of bytes used generating the audio sound. - - Examples - -------- - - >>> from moviepy import * - >>> clip = VideoFileClip("media/chaplin.mp4") - >>> clip.preview(fps=10, audio=False) - """ - audio = audio and (self.audio is not None) - audio_flag = None - video_flag = None - - if audio: - # the sound will be played in parallel. We are not - # parralellizing it on different CPUs because it seems that - # ffplay use several cpus. - - # two synchro-flags to tell whether audio and video are ready - video_flag = threading.Event() - audio_flag = threading.Event() - # launch the thread - audiothread = threading.Thread( - target=self.audio.audiopreview, - args=( - audio_fps, - audio_buffersize, - audio_nbytes, - audio_flag, - video_flag, - ), - ) - audiothread.start() - - # passthrough to ffmpeg, passing flag for ffmpeg to set - ffplay_preview_video( - clip=self, fps=fps, audio_flag=audio_flag, video_flag=video_flag - )
- - # ----------------------------------------------------------------- - # F I L T E R I N G - -
[docs] def with_sub_effects( - self, effects: List["Effect"], start_time=0, end_time=None, **kwargs - ): - """Apply a transformation to a part of the clip. - - Returns a new clip in which the function ``fun`` (clip->clip) - has been applied to the subclip between times `start_time` and `end_time` - (in seconds). - - Examples - -------- - - >>> # The scene between times t=3s and t=6s in ``clip`` will be - >>> # be played twice slower in ``new_clip`` - >>> new_clip = clip.with_sub_effect(MultiplySpeed(0.5), 3, 6) - - """ - left = None if (start_time == 0) else self.with_subclip(0, start_time) - center = self.with_subclip(start_time, end_time).with_effects(effects, **kwargs) - right = None if (end_time is None) else self.with_subclip(start_time=end_time) - - clips = [clip for clip in [left, center, right] if clip is not None] - - # beurk, have to find other solution - from moviepy.video.compositing.CompositeVideoClip import concatenate_videoclips - - return concatenate_videoclips(clips).with_start(self.start)
- - # IMAGE FILTERS - -
[docs] def image_transform(self, image_func, apply_to=None): - """Modifies the images of a clip by replacing the frame `get_frame(t)` by - another frame, `image_func(get_frame(t))`. - """ - apply_to = apply_to or [] - return self.transform(lambda get_frame, t: image_func(get_frame(t)), apply_to)
- - # -------------------------------------------------------------- - # C O M P O S I T I N G - -
[docs] def fill_array(self, pre_array, shape=(0, 0)): - """Fills an array to match the specified shape. - - If the `pre_array` is smaller than the desired shape, the missing rows - or columns are added with ones to the bottom or right, respectively, - until the shape matches. If the `pre_array` is larger than the desired - shape, the excess rows or columns are cropped from the bottom or right, - respectively, until the shape matches. - - The resulting array with the filled shape is returned. - - Parameters - ---------- - pre_array (numpy.ndarray) - The original array to be filled. - - shape (tuple) - The desired shape of the resulting array. - """ - pre_shape = pre_array.shape - dx = shape[0] - pre_shape[0] - dy = shape[1] - pre_shape[1] - post_array = pre_array - if dx < 0: - post_array = pre_array[: shape[0]] - elif dx > 0: - x_1 = [[[1, 1, 1]] * pre_shape[1]] * dx - post_array = np.vstack((pre_array, x_1)) - if dy < 0: - post_array = post_array[:, : shape[1]] - elif dy > 0: - x_1 = [[[1, 1, 1]] * dy] * post_array.shape[0] - post_array = np.hstack((post_array, x_1)) - return post_array
- -
[docs] def blit_on(self, picture, t): - """Returns the result of the blit of the clip's frame at time `t` - on the given `picture`, the position of the clip being given - by the clip's ``pos`` attribute. Meant for compositing. - """ - wf, hf = picture.size - - ct = t - self.start # clip time - - # GET IMAGE AND MASK IF ANY - img = self.get_frame(ct).astype("uint8") - im_img = Image.fromarray(img) - - if self.mask is not None: - mask = (self.mask.get_frame(ct) * 255).astype("uint8") - im_mask = Image.fromarray(mask).convert("L") - - if im_img.size != im_mask.size: - bg_size = ( - max(im_img.size[0], im_mask.size[0]), - max(im_img.size[1], im_mask.size[1]), - ) - - im_img_bg = Image.new("RGB", bg_size, "black") - im_img_bg.paste(im_img, (0, 0)) - - im_mask_bg = Image.new("L", bg_size, 0) - im_mask_bg.paste(im_mask, (0, 0)) - - im_img, im_mask = im_img_bg, im_mask_bg - - else: - im_mask = None - - wi, hi = im_img.size - # SET POSITION - pos = self.pos(ct) - - # preprocess short writings of the position - if isinstance(pos, str): - pos = { - "center": ["center", "center"], - "left": ["left", "center"], - "right": ["right", "center"], - "top": ["center", "top"], - "bottom": ["center", "bottom"], - }[pos] - else: - pos = list(pos) - - # is the position relative (given in % of the clip's size) ? - if self.relative_pos: - for i, dim in enumerate([wf, hf]): - if not isinstance(pos[i], str): - pos[i] = dim * pos[i] - - if isinstance(pos[0], str): - D = {"left": 0, "center": (wf - wi) / 2, "right": wf - wi} - pos[0] = D[pos[0]] - - if isinstance(pos[1], str): - D = {"top": 0, "center": (hf - hi) / 2, "bottom": hf - hi} - pos[1] = D[pos[1]] - - pos = map(int, pos) - return blit(im_img, picture, pos, mask=im_mask)
- -
[docs] def with_add_mask(self): - """Add a mask VideoClip to the VideoClip. - - Returns a copy of the clip with a completely opaque mask - (made of ones). This makes computations slower compared to - having a None mask but can be useful in many cases. Choose - - Set ``constant_size`` to `False` for clips with moving - image size. - """ - if self.has_constant_size: - mask = ColorClip(self.size, 1.0, is_mask=True) - return self.with_mask(mask.with_duration(self.duration)) - else: - - def make_frame(t): - return np.ones(self.get_frame(t).shape[:2], dtype=float) - - mask = VideoClip(is_mask=True, make_frame=make_frame) - return self.with_mask(mask.with_duration(self.duration))
- -
[docs] def with_on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None): - """Place the clip on a colored background. - - Returns a clip made of the current clip overlaid on a color - clip of a possibly bigger size. Can serve to flatten transparent - clips. - - Parameters - ---------- - - size - Size (width, height) in pixels of the final clip. - By default it will be the size of the current clip. - - color - Background color of the final clip ([R,G,B]). - - pos - Position of the clip in the final clip. 'center' is the default - - col_opacity - Parameter in 0..1 indicating the opacity of the colored - background. - """ - from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip - - if size is None: - size = self.size - if pos is None: - pos = "center" - - if col_opacity is not None: - colorclip = ColorClip( - size, color=color, duration=self.duration - ).with_opacity(col_opacity) - result = CompositeVideoClip([colorclip, self.with_position(pos)]) - else: - result = CompositeVideoClip( - [self.with_position(pos)], size=size, bg_color=color - ) - - if ( - isinstance(self, ImageClip) - and (not hasattr(pos, "__call__")) - and ((self.mask is None) or isinstance(self.mask, ImageClip)) - ): - new_result = result.to_ImageClip() - if result.mask is not None: - new_result.mask = result.mask.to_ImageClip() - return new_result.with_duration(result.duration) - - return result
- -
[docs] @outplace - def with_make_frame(self, mf): - """Change the clip's ``get_frame``. - - Returns a copy of the VideoClip instance, with the make_frame - attribute set to `mf`. - """ - self.make_frame = mf - self.size = self.get_frame(0).shape[:2][::-1]
- -
[docs] @outplace - def with_audio(self, audioclip): - """Attach an AudioClip to the VideoClip. - - Returns a copy of the VideoClip instance, with the `audio` - attribute set to ``audio``, which must be an AudioClip instance. - """ - self.audio = audioclip
- -
[docs] @outplace - def with_mask(self, mask): - """Set the clip's mask. - - Returns a copy of the VideoClip with the mask attribute set to - ``mask``, which must be a greyscale (values in 0-1) VideoClip. - """ - assert mask is None or mask.is_mask - self.mask = mask
- -
[docs] @add_mask_if_none - @outplace - def with_opacity(self, opacity): - """Set the opacity/transparency level of the clip. - - Returns a semi-transparent copy of the clip where the mask is - multiplied by ``op`` (any float, normally between 0 and 1). - """ - self.mask = self.mask.image_transform(lambda pic: opacity * pic)
- -
[docs] @apply_to_mask - @outplace - def with_position(self, pos, relative=False): - """Set the clip's position in compositions. - - Sets the position that the clip will have when included - in compositions. The argument ``pos`` can be either a couple - ``(x,y)`` or a function ``t-> (x,y)``. `x` and `y` mark the - location of the top left corner of the clip, and can be - of several types. - - Examples - -------- - - >>> clip.with_position((45,150)) # x=45, y=150 - >>> - >>> # clip horizontally centered, at the top of the picture - >>> clip.with_position(("center","top")) - >>> - >>> # clip is at 40% of the width, 70% of the height: - >>> clip.with_position((0.4,0.7), relative=True) - >>> - >>> # clip's position is horizontally centered, and moving up ! - >>> clip.with_position(lambda t: ('center', 50+t) ) - - """ - self.relative_pos = relative - if hasattr(pos, "__call__"): - self.pos = pos - else: - self.pos = lambda t: pos
- -
[docs] @apply_to_mask - @outplace - def with_layer(self, layer): - """Set the clip's layer in compositions. Clips with a greater ``layer`` - attribute will be displayed on top of others. - - Note: Only has effect when the clip is used in a CompositeVideoClip. - """ - self.layer = layer
- -
[docs] def resized(self, new_size=None, height=None, width=None, apply_to_mask=True): - """Returns a video clip that is a resized version of the clip. - For info on the parameters, please see ``vfx.Resize`` - """ - return self.with_effects( - [ - Resize( - new_size=new_size, - height=height, - width=width, - apply_to_mask=apply_to_mask, - ) - ] - )
- -
[docs] def rotated( - self, - angle: float, - unit: str = "deg", - resample: str = "bicubic", - expand: bool = False, - center: tuple = None, - translate: tuple = None, - bg_color: tuple = None, - ): - """Rotates the specified clip by ``angle`` degrees (or radians) anticlockwise - If the angle is not a multiple of 90 (degrees) or ``center``, ``translate``, - and ``bg_color`` are not ``None``. - For info on the parameters, please see ``vfx.Rotate`` - """ - return self.with_effects( - [ - Rotate( - angle=angle, - unit=unit, - resample=resample, - expand=expand, - center=center, - translate=translate, - bg_color=bg_color, - ) - ] - )
- -
[docs] def cropped( - self, - x1: int = None, - y1: int = None, - x2: int = None, - y2: int = None, - width: int = None, - height: int = None, - x_center: int = None, - y_center: int = None, - ): - """Returns a new clip in which just a rectangular subregion of the - original clip is conserved. x1,y1 indicates the top left corner and - x2,y2 is the lower right corner of the croped region. - All coordinates are in pixels. Float numbers are accepted. - For info on the parameters, please see ``vfx.Crop`` - """ - return self.with_effects( - [ - Crop( - x1=x1, - y1=y1, - x2=x2, - y2=y2, - width=width, - height=height, - x_center=x_center, - y_center=y_center, - ) - ] - )
- - # -------------------------------------------------------------- - # CONVERSIONS TO OTHER TYPES - -
[docs] @convert_parameter_to_seconds(["t"]) - def to_ImageClip(self, t=0, with_mask=True, duration=None): - """ - Returns an ImageClip made out of the clip's frame at time ``t``, - which can be expressed in seconds (15.35), in (min, sec), - in (hour, min, sec), or as a string: '01:03:05.35'. - """ - new_clip = ImageClip(self.get_frame(t), is_mask=self.is_mask, duration=duration) - if with_mask and self.mask is not None: - new_clip.mask = self.mask.to_ImageClip(t) - return new_clip
- -
[docs] def to_mask(self, canal=0): - """Return a mask a video clip made from the clip.""" - if self.is_mask: - return self - else: - new_clip = self.image_transform(lambda pic: 1.0 * pic[:, :, canal] / 255) - new_clip.is_mask = True - return new_clip
- -
[docs] def to_RGB(self): - """Return a non-mask video clip made from the mask video clip.""" - if self.is_mask: - new_clip = self.image_transform( - lambda pic: np.dstack(3 * [255 * pic]).astype("uint8") - ) - new_clip.is_mask = False - return new_clip - else: - return self
- - # ---------------------------------------------------------------- - # Audio - -
[docs] @outplace - def without_audio(self): - """Remove the clip's audio. - - Return a copy of the clip with audio set to None. - """ - self.audio = None
- - def __add__(self, other): - if isinstance(other, VideoClip): - from moviepy.video.compositing.CompositeVideoClip import ( - concatenate_videoclips, - ) - - method = "chain" if self.size == other.size else "compose" - return concatenate_videoclips([self, other], method=method) - return super(VideoClip, self).__add__(other) - - def __or__(self, other): - """ - Implement the or (self | other) to produce a video with self and other - placed side by side horizontally. - """ - if isinstance(other, VideoClip): - from moviepy.video.compositing.CompositeVideoClip import clips_array - - return clips_array([[self, other]]) - return super(VideoClip, self).__or__(other) - - def __truediv__(self, other): - """ - Implement division (self / other) to produce a video with self - placed on top of other. - """ - if isinstance(other, VideoClip): - from moviepy.video.compositing.CompositeVideoClip import clips_array - - return clips_array([[self], [other]]) - return super(VideoClip, self).__or__(other) - - def __matmul__(self, n): - """ - Implement matrice multiplication (self @ other) to rotate a video - by other degrees - """ - if not isinstance(n, Real): - return NotImplemented - - from moviepy.video.fx.Rotate import Rotate - - return self.with_effects([Rotate(n)]) - - def __and__(self, mask): - """ - Implement the and (self & other) to produce a video with other - used as a mask for self. - """ - return self.with_mask(mask)
- - -
[docs]class DataVideoClip(VideoClip): - """ - Class of video clips whose successive frames are functions - of successive datasets - - Parameters - ---------- - data - A list of datasets, each dataset being used for one frame of the clip - - data_to_frame - A function d -> video frame, where d is one element of the list `data` - - fps - Number of frames per second in the animation - """ - - def __init__(self, data, data_to_frame, fps, is_mask=False, has_constant_size=True): - self.data = data - self.data_to_frame = data_to_frame - self.fps = fps - - def make_frame(t): - return self.data_to_frame(self.data[int(self.fps * t)]) - - VideoClip.__init__( - self, - make_frame, - is_mask=is_mask, - duration=1.0 * len(data) / fps, - has_constant_size=has_constant_size, - )
- - -
[docs]class UpdatedVideoClip(VideoClip): - """ - Class of clips whose make_frame requires some objects to - be updated. Particularly practical in science where some - algorithm needs to make some steps before a new frame can - be generated. - - UpdatedVideoClips have the following make_frame: - - >>> def make_frame(t): - >>> while self.world.clip_t < t: - >>> world.update() # updates, and increases world.clip_t - >>> return world.to_frame() - - Parameters - ---------- - - world - An object with the following attributes: - - world.clip_t: the clip's time corresponding to the world's state. - - world.update() : update the world's state, (including increasing - world.clip_t of one time step). - - world.to_frame() : renders a frame depending on the world's state. - - is_mask - True if the clip is a WxH mask with values in 0-1 - - duration - Duration of the clip, in seconds - - """ - - def __init__(self, world, is_mask=False, duration=None): - self.world = world - - def make_frame(t): - while self.world.clip_t < t: - world.update() - return world.to_frame() - - VideoClip.__init__( - self, make_frame=make_frame, is_mask=is_mask, duration=duration - )
- - -"""--------------------------------------------------------------------- - - ImageClip (base class for all 'static clips') and its subclasses - ColorClip and TextClip. - I would have liked to put these in a separate file but Python is bad - at cyclic imports. - ----------------------------------------------------------------------""" - - -
[docs]class ImageClip(VideoClip): - """Class for non-moving VideoClips. - - A video clip originating from a picture. This clip will simply - display the given picture at all times. - - Examples - -------- - - >>> clip = ImageClip("myHouse.jpeg") - >>> clip = ImageClip( someArray ) # a Numpy array represent - - Parameters - ---------- - - img - Any picture file (png, tiff, jpeg, etc.) as a string or a path-like object, - or any array representing an RGB image (for instance a frame from a VideoClip). - - is_mask - Set this parameter to `True` if the clip is a mask. - - transparent - Set this parameter to `True` (default) if you want the alpha layer - of the picture (if it exists) to be used as a mask. - - Attributes - ---------- - - img - Array representing the image of the clip. - - """ - - def __init__( - self, img, is_mask=False, transparent=True, fromalpha=False, duration=None - ): - VideoClip.__init__(self, is_mask=is_mask, duration=duration) - - if not isinstance(img, np.ndarray): - # img is a string or path-like object, so read it in from disk - img = imread_v2(img) # We use v2 imread cause v3 fail with gif - - if len(img.shape) == 3: # img is (now) a RGB(a) numpy array - if img.shape[2] == 4: - if fromalpha: - img = 1.0 * img[:, :, 3] / 255 - elif is_mask: - img = 1.0 * img[:, :, 0] / 255 - elif transparent: - self.mask = ImageClip(1.0 * img[:, :, 3] / 255, is_mask=True) - img = img[:, :, :3] - elif is_mask: - img = 1.0 * img[:, :, 0] / 255 - - # if the image was just a 2D mask, it should arrive here - # unchanged - self.make_frame = lambda t: img - self.size = img.shape[:2][::-1] - self.img = img - -
[docs] def transform(self, func, apply_to=None, keep_duration=True): - """General transformation filter. - - Equivalent to VideoClip.transform. The result is no more an - ImageClip, it has the class VideoClip (since it may be animated) - """ - if apply_to is None: - apply_to = [] - # When we use transform on an image clip it may become animated. - # Therefore the result is not an ImageClip, just a VideoClip. - new_clip = VideoClip.transform( - self, func, apply_to=apply_to, keep_duration=keep_duration - ) - new_clip.__class__ = VideoClip - return new_clip
- -
[docs] @outplace - def image_transform(self, image_func, apply_to=None): - """Image-transformation filter. - - Does the same as VideoClip.image_transform, but for ImageClip the - transformed clip is computed once and for all at the beginning, - and not for each 'frame'. - """ - if apply_to is None: - apply_to = [] - arr = image_func(self.get_frame(0)) - self.size = arr.shape[:2][::-1] - self.make_frame = lambda t: arr - self.img = arr - - for attr in apply_to: - a = getattr(self, attr, None) - if a is not None: - new_a = a.image_transform(image_func) - setattr(self, attr, new_a)
- -
[docs] @outplace - def time_transform(self, time_func, apply_to=None, keep_duration=False): - """Time-transformation filter. - - Applies a transformation to the clip's timeline - (see Clip.time_transform). - - This method does nothing for ImageClips (but it may affect their - masks or their audios). The result is still an ImageClip. - """ - if apply_to is None: - apply_to = ["mask", "audio"] - for attr in apply_to: - a = getattr(self, attr, None) - if a is not None: - new_a = a.time_transform(time_func) - setattr(self, attr, new_a)
- - -
[docs]class ColorClip(ImageClip): - """An ImageClip showing just one color. - - Parameters - ---------- - - size - Size (width, height) in pixels of the clip. - - color - If argument ``is_mask`` is False, ``color`` indicates - the color in RGB of the clip (default is black). If `is_mask`` - is True, ``color`` must be a float between 0 and 1 (default is 1) - - is_mask - Set to true if the clip will be used as a mask. - - """ - - def __init__(self, size, color=None, is_mask=False, duration=None): - w, h = size - - if is_mask: - shape = (h, w) - if color is None: - color = 0 - elif not np.isscalar(color): - raise Exception("Color has to be a scalar when mask is true") - else: - if color is None: - color = (0, 0, 0) - elif not hasattr(color, "__getitem__"): - raise Exception("Color has to contain RGB of the clip") - elif isinstance(color, str): - raise Exception( - "Color cannot be string. Color has to contain RGB of the clip" - ) - shape = (h, w, len(color)) - - super().__init__( - np.tile(color, w * h).reshape(shape), is_mask=is_mask, duration=duration - )
- - -
[docs]class TextClip(ImageClip): - """Class for autogenerated text clips. - - Creates an ImageClip originating from a script-generated text image. - - Parameters - ---------- - - font - Path to the font to use. Must be an OpenType font. - See ``TextClip.list('font')`` for the list of fonts you can use on - your computer. - - text - A string of the text to write. Can be replaced by argument - ``filename``. - - filename - The name of a file in which there is the text to write, - as a string or a path-like object. - Can be provided instead of argument ``text`` - - font_size - Font size in point. Can be auto-set if method='caption', - or if method='label' and size is set. - - size - Size of the picture in pixels. Can be auto-set if - method='label' and font_size is set, but mandatory if method='caption'. - the height can be None for caption if font_size is defined, - it will then be auto-determined. - - margin - Margin to be added arround the text as a tuple of two (symmetrical) or - four (asymmetrical). Either ``(horizontal, vertical)`` or - ``(left, top, right, bottom)``. By default no margin (None, None). - This is especially usefull for auto-compute size to give the text some - extra room. - - bg_color - Color of the background. Default to None for no background. Can be - a RGB (or RGBA if transparent = ``True``) ``tuple``, a color name, or an - hexadecimal notation. - - color - Color of the text. Default to "black". Can be - a RGB (or RGBA if transparent = ``True``) ``tuple``, a color name, or an - hexadecimal notation. - - - stroke_color - Color of the stroke (=contour line) of the text. If ``None``, - there will be no stroke. - - stroke_width - Width of the stroke, in pixels. Can be a float, like 1.5. - - method - Either 'label' (default, the picture will be autosized so as to fit - exactly the size) or 'caption' (the text will be drawn in a picture - with fixed size provided with the ``size`` argument). If `caption`, - the text will be wrapped automagically. - - text_align - center | left | right. Text align similar to css. Default to ``left``. - - horizontal_align - center | left | right. Define horizontal align of text bloc in image. - Default to ``center``. - - vertical_align - center | top | bottom. Define vertical align of text bloc in image. - Default to ``center``. - - interline - Interline spacing. Default to ``4``. - - transparent - ``True`` (default) if you want to take into account the - transparency in the image. - - duration - Duration of the clip - """ - - @convert_path_to_string("filename") - def __init__( - self, - font, - text=None, - filename=None, - font_size=None, - size=(None, None), - margin=(None, None), - color="black", - bg_color=None, - stroke_color=None, - stroke_width=0, - method="label", - text_align="left", - horizontal_align="center", - vertical_align="center", - interline=4, - transparent=True, - duration=None, - ): - def break_text( - width, text, font, font_size, stroke_width, align, spacing - ) -> List[str]: - """Break text to never overflow a width""" - img = Image.new("RGB", (1, 1)) - font_pil = ImageFont.truetype(font, font_size) - draw = ImageDraw.Draw(img) - - lines = [] - current_line = "" - words = text.split(" ") - for word in words: - temp_line = current_line + " " + word if current_line else word - temp_left, temp_top, temp_right, temp_bottom = draw.multiline_textbbox( - (0, 0), - temp_line, - font=font_pil, - spacing=spacing, - align=align, - stroke_width=stroke_width, - ) - temp_width = temp_right - temp_left - - if temp_width <= width: - current_line = temp_line - else: - lines.append(current_line) - current_line = word - - if current_line: - lines.append(current_line) - - return lines - - def find_text_size( - text, - font, - font_size, - stroke_width, - align, - spacing, - max_width=None, - allow_break=False, - ) -> tuple[int, int]: - """Find dimensions a text will occupy, return a tuple (width, height)""" - img = Image.new("RGB", (1, 1)) - font_pil = ImageFont.truetype(font, font_size) - draw = ImageDraw.Draw(img) - - if max_width is None or not allow_break: - left, top, right, bottom = draw.multiline_textbbox( - (0, 0), - text, - font=font_pil, - spacing=spacing, - align=align, - stroke_width=stroke_width, - anchor="lm", - ) - - return (int(right - left), int(bottom - top)) - - lines = break_text( - width=max_width, - text=text, - font=font, - font_size=font_size, - stroke_width=stroke_width, - align=align, - spacing=spacing, - ) - - left, top, right, bottom = draw.multiline_textbbox( - (0, 0), - "\n".join(lines), - font=font_pil, - spacing=spacing, - align=align, - stroke_width=stroke_width, - anchor="lm", - ) - - return (int(right - left), int(bottom - top)) - - def find_optimum_font_size( - text, - font, - stroke_width, - align, - spacing, - width, - height=None, - allow_break=False, - ): - """Find the best font size to fit as optimally as possible""" - max_font_size = width - min_font_size = 1 - - # Try find best size using bisection - while min_font_size < max_font_size: - avg_font_size = int((max_font_size + min_font_size) // 2) - text_width, text_height = find_text_size( - text, - font, - avg_font_size, - stroke_width, - align, - spacing, - max_width=width, - allow_break=allow_break, - ) - - if text_width <= width and (height is None or text_height <= height): - min_font_size = avg_font_size + 1 - else: - max_font_size = avg_font_size - 1 - - # Check if the last font size tested fits within the given width and height - text_width, text_height = find_text_size( - text, - font, - min_font_size, - stroke_width, - align, - spacing, - max_width=width, - allow_break=allow_break, - ) - if text_width <= width and (height is None or text_height <= height): - return min_font_size - else: - return min_font_size - 1 - - try: - print("f", font) - _ = ImageFont.truetype(font) - except Exception as e: - raise ValueError( - "Invalid font {}, pillow failed to use it with error {}".format(font, e) - ) - - if filename: - with open(filename, "r") as file: - text = file.read().rstrip() # Remove newline at end - - if text is None: - raise ValueError("No text nor filename provided") - - # Compute all img and text sizes if some are missing - img_width, img_height = size - - if method == "caption": - if img_width is None: - raise ValueError("Size is mandatory when method is caption") - - if img_height is None and font_size is None: - raise ValueError( - "Height is mandatory when method is caption and font size is None" - ) - - if font_size is None: - font_size = find_optimum_font_size( - text=text, - font=font, - stroke_width=stroke_width, - align=text_align, - spacing=interline, - width=img_width, - height=img_height, - allow_break=True, - ) - - if img_height is None: - img_height = find_text_size( - text=text, - font=font, - font_size=font_size, - stroke_width=stroke_width, - align=text_align, - spacing=interline, - max_width=img_width, - allow_break=True, - )[1] - - # Add line breaks whenever needed - text = "\n".join( - break_text( - width=img_width, - text=text, - font=font, - font_size=font_size, - stroke_width=stroke_width, - align=text_align, - spacing=interline, - ) - ) - - elif method == "label": - if font_size is None and img_width is None: - raise ValueError( - "Font size is mandatory when method is label and size is None" - ) - - if font_size is None: - font_size = find_optimum_font_size( - text=text, - font=font, - stroke_width=stroke_width, - align=text_align, - spacing=interline, - width=img_width, - height=img_height, - ) - - if img_width is None: - img_width = find_text_size( - text=text, - font=font, - font_size=font_size, - stroke_width=stroke_width, - align=text_align, - spacing=interline, - )[0] - - if img_height is None: - img_height = find_text_size( - text=text, - font=font, - font_size=font_size, - stroke_width=stroke_width, - align=text_align, - spacing=interline, - max_width=img_width, - )[1] - - else: - raise ValueError("Method must be either `caption` or `label`.") - - # Compute the margin and apply it - if len(margin) == 2: - left_margin = right_margin = int(margin[0] or 0) - top_margin = bottom_margin = int(margin[1] or 0) - elif len(margin) == 4: - left_margin = int(margin[0] or 0) - top_margin = int(margin[1] or 0) - right_margin = int(margin[2] or 0) - bottom_margin = int(margin[3] or 0) - else: - raise ValueError("Margin must be a tuple of either 2 or 4 elements.") - - img_width += left_margin + right_margin - img_height += top_margin + bottom_margin - - # Trace the image - img_mode = "RGBA" if transparent else "RGB" - - if bg_color is None and transparent: - bg_color = (0, 0, 0, 0) - - img = Image.new(img_mode, (img_width, img_height), color=bg_color) - pil_font = ImageFont.truetype(font, font_size) - draw = ImageDraw.Draw(img) - - # Dont need allow break here, because we already breaked in caption - text_width, text_height = find_text_size( - text=text, - font=font, - font_size=font_size, - stroke_width=stroke_width, - align=text_align, - spacing=interline, - max_width=img_width, - ) - - x = 0 - if horizontal_align == "right": - x = img_width - text_width - left_margin - right_margin - elif horizontal_align == "center": - x = (img_width - left_margin - right_margin - text_width) / 2 - - x += left_margin - - y = 0 - if vertical_align == "bottom": - y = img_height - text_height - top_margin - bottom_margin - elif vertical_align == "center": - y = (img_height - top_margin - bottom_margin - text_height) / 2 - - y += top_margin - - # So, pillow multiline support is horrible, in particular multiline_text - # and multiline_textbbox are not intuitive at all. They cannot use left - # top (see https://pillow.readthedocs.io/en/stable/handbook/text-anchors.html) - # as anchor, so we always have to use left middle instead. Else we would - # always have a useless margin (the diff between ascender and top) on any - # text. That mean our Y is actually not from 0 for top, but need to be - # increment by half our text height, since we have to reference from - # middle line. - y += text_height / 2 - print(y) - - print(text_align) - draw.multiline_text( - xy=(x, y), - text=text, - fill=color, - font=pil_font, - spacing=interline, - align=text_align, - stroke_width=stroke_width, - stroke_fill=stroke_color, - anchor="lm", - ) - - # We just need the image as a numpy array - img_numpy = np.array(img) - - ImageClip.__init__( - self, img=img_numpy, transparent=transparent, duration=duration - ) - self.text = text - self.color = color - self.stroke_color = stroke_color
- - -
[docs]class BitmapClip(VideoClip): - """Clip made of color bitmaps. Mainly designed for testing purposes.""" - - DEFAULT_COLOR_DICT = { - "R": (255, 0, 0), - "G": (0, 255, 0), - "B": (0, 0, 255), - "O": (0, 0, 0), - "W": (255, 255, 255), - "A": (89, 225, 62), - "C": (113, 157, 108), - "D": (215, 182, 143), - "E": (57, 26, 252), - "F": (225, 135, 33), - } - - @convert_parameter_to_seconds(["duration"]) - def __init__( - self, bitmap_frames, *, fps=None, duration=None, color_dict=None, is_mask=False - ): - """Creates a VideoClip object from a bitmap representation. Primarily used - in the test suite. - - Parameters - ---------- - - bitmap_frames - A list of frames. Each frame is a list of strings. Each string - represents a row of colors. Each color represents an (r, g, b) tuple. - Example input (2 frames, 5x3 pixel size):: - - [["RRRRR", - "RRBRR", - "RRBRR"], - ["RGGGR", - "RGGGR", - "RGGGR"]] - - fps - The number of frames per second to display the clip at. `duration` will - calculated from the total number of frames. If both `fps` and `duration` - are set, `duration` will be ignored. - - duration - The total duration of the clip. `fps` will be calculated from the total - number of frames. If both `fps` and `duration` are set, `duration` will - be ignored. - - color_dict - A dictionary that can be used to set specific (r, g, b) values that - correspond to the letters used in ``bitmap_frames``. - eg ``{"A": (50, 150, 150)}``. - - Defaults to:: - - { - "R": (255, 0, 0), - "G": (0, 255, 0), - "B": (0, 0, 255), - "O": (0, 0, 0), # "O" represents black - "W": (255, 255, 255), - # "A", "C", "D", "E", "F" represent arbitrary colors - "A": (89, 225, 62), - "C": (113, 157, 108), - "D": (215, 182, 143), - "E": (57, 26, 252), - } - - is_mask - Set to ``True`` if the clip is going to be used as a mask. - """ - assert fps is not None or duration is not None - - self.color_dict = color_dict if color_dict else self.DEFAULT_COLOR_DICT - - frame_list = [] - for input_frame in bitmap_frames: - output_frame = [] - for row in input_frame: - output_frame.append([self.color_dict[color] for color in row]) - frame_list.append(np.array(output_frame)) - - frame_array = np.array(frame_list) - self.total_frames = len(frame_array) - - if fps is None: - fps = self.total_frames / duration - else: - duration = self.total_frames / fps - - VideoClip.__init__( - self, - make_frame=lambda t: frame_array[int(t * fps)], - is_mask=is_mask, - duration=duration, - ) - self.fps = fps - -
[docs] def to_bitmap(self, color_dict=None): - """Returns a valid bitmap list that represents each frame of the clip. - If `color_dict` is not specified, then it will use the same `color_dict` - that was used to create the clip. - """ - color_dict = color_dict or self.color_dict - - bitmap = [] - for frame in self.iter_frames(): - bitmap.append([]) - for line in frame: - bitmap[-1].append("") - for pixel in line: - letter = list(color_dict.keys())[ - list(color_dict.values()).index(tuple(pixel)) - ] - bitmap[-1][-1] += letter - - return bitmap
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/compositing/CompositeVideoClip.html b/_modules/moviepy/video/compositing/CompositeVideoClip.html deleted file mode 100644 index c16e92153..000000000 --- a/_modules/moviepy/video/compositing/CompositeVideoClip.html +++ /dev/null @@ -1,775 +0,0 @@ - - - - - - - - - - - moviepy.video.compositing.CompositeVideoClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.compositing.CompositeVideoClip

-"""Main video composition interface of MoviePy."""
-
-from functools import reduce
-
-import numpy as np
-from PIL import Image
-
-from moviepy.audio.AudioClip import CompositeAudioClip
-from moviepy.video.VideoClip import ColorClip, VideoClip
-
-
-
[docs]class CompositeVideoClip(VideoClip): - """ - A VideoClip made of other videoclips displayed together. This is the - base class for most compositions. - - Parameters - ---------- - - size - The size (width, height) of the final clip. - - clips - A list of videoclips. - - Clips with a higher ``layer`` attribute will be displayed - on top of other clips in a lower layer. - If two or more clips share the same ``layer``, - then the one appearing latest in ``clips`` will be displayed - on top (i.e. it has the higher layer). - - For each clip: - - - The attribute ``pos`` determines where the clip is placed. - See ``VideoClip.set_pos`` - - The mask of the clip determines which parts are visible. - - Finally, if all the clips in the list have their ``duration`` - attribute set, then the duration of the composite video clip - is computed automatically - - bg_color - Color for the unmasked and unfilled regions. Set to None for these - regions to be transparent (will be slower). - - use_bgclip - Set to True if the first clip in the list should be used as the - 'background' on which all other clips are blitted. That first clip must - have the same size as the final clip. If it has no transparency, the final - clip will have no mask. - - The clip with the highest FPS will be the FPS of the composite clip. - - """ - - def __init__( - self, clips, size=None, bg_color=None, use_bgclip=False, is_mask=False - ): - if size is None: - size = clips[0].size - - if use_bgclip and (clips[0].mask is None): - transparent = False - else: - transparent = bg_color is None - - if bg_color is None: - bg_color = 0.0 if is_mask else (0, 0, 0) - - fpss = [clip.fps for clip in clips if getattr(clip, "fps", None)] - self.fps = max(fpss) if fpss else None - - VideoClip.__init__(self) - - self.size = size - self.is_mask = is_mask - self.clips = clips - self.bg_color = bg_color - - if use_bgclip: - self.bg = clips[0] - self.clips = clips[1:] - self.created_bg = False - else: - self.clips = clips - self.bg = ColorClip(size, color=self.bg_color, is_mask=is_mask) - self.created_bg = True - - # order self.clips by layer - self.clips = sorted(self.clips, key=lambda clip: clip.layer) - - # compute duration - ends = [clip.end for clip in self.clips] - if None not in ends: - duration = max(ends) - self.duration = duration - self.end = duration - - # compute audio - audioclips = [v.audio for v in self.clips if v.audio is not None] - if audioclips: - self.audio = CompositeAudioClip(audioclips) - - # compute mask if necessary - if transparent: - maskclips = [ - (clip.mask if (clip.mask is not None) else clip.with_add_mask().mask) - .with_position(clip.pos) - .with_end(clip.end) - .with_start(clip.start, change_end=False) - .with_layer(clip.layer) - for clip in self.clips - ] - - self.mask = CompositeVideoClip( - maskclips, self.size, is_mask=True, bg_color=0.0 - ) - -
[docs] def make_frame(self, t): - """The clips playing at time `t` are blitted over one another.""" - frame = self.bg.get_frame(t).astype("uint8") - im = Image.fromarray(frame) - - if self.bg.mask is not None: - frame_mask = self.bg.mask.get_frame(t) - im_mask = Image.fromarray(255 * frame_mask).convert("L") - im = im.putalpha(im_mask) - - for clip in self.playing_clips(t): - im = clip.blit_on(im, t) - - return np.array(im)
- -
[docs] def playing_clips(self, t=0): - """Returns a list of the clips in the composite clips that are - actually playing at the given time `t`. - """ - return [clip for clip in self.clips if clip.is_playing(t)]
- -
[docs] def close(self): - """Closes the instance, releasing all the resources.""" - if self.created_bg and self.bg: - # Only close the background clip if it was locally created. - # Otherwise, it remains the job of whoever created it. - self.bg.close() - self.bg = None - if hasattr(self, "audio") and self.audio: - self.audio.close() - self.audio = None
- - -
[docs]def clips_array(array, rows_widths=None, cols_heights=None, bg_color=None): - """Given a matrix whose rows are clips, creates a CompositeVideoClip where - all clips are placed side by side horizontally for each clip in each row - and one row on top of the other for each row. So given next matrix of clips - with same size: - - ```python - clips_array([[clip1, clip2, clip3], [clip4, clip5, clip6]]) - ``` - - the result will be a CompositeVideoClip with a layout displayed like: - - ``` - ┏━━━━━━━┳━━━━━━━┳━━━━━━━┓ - ┃ ┃ ┃ ┃ - ┃ clip1 ┃ clip2 ┃ clip3 ┃ - ┃ ┃ ┃ ┃ - ┣━━━━━━━╋━━━━━━━╋━━━━━━━┫ - ┃ ┃ ┃ ┃ - ┃ clip4 ┃ clip5 ┃ clip6 ┃ - ┃ ┃ ┃ ┃ - ┗━━━━━━━┻━━━━━━━┻━━━━━━━┛ - ``` - - If some clips doesn't fulfill the space required by the rows or columns - in which are placed, that space will be filled by the color defined in - ``bg_color``. - - array - Matrix of clips included in the returned composited video clip. - - rows_widths - Widths of the different rows in pixels. If ``None``, is set automatically. - - cols_heights - Heights of the different columns in pixels. If ``None``, is set automatically. - - bg_color - Fill color for the masked and unfilled regions. Set to ``None`` for these - regions to be transparent (processing will be slower). - """ - array = np.array(array) - sizes_array = np.array([[clip.size for clip in line] for line in array]) - - # find row width and col_widths automatically if not provided - if rows_widths is None: - rows_widths = sizes_array[:, :, 1].max(axis=1) - if cols_heights is None: - cols_heights = sizes_array[:, :, 0].max(axis=0) - - # compute start positions of X for rows and Y for columns - xs = np.cumsum([0] + list(cols_heights)) - ys = np.cumsum([0] + list(rows_widths)) - - for j, (x, ch) in enumerate(zip(xs[:-1], cols_heights)): - for i, (y, rw) in enumerate(zip(ys[:-1], rows_widths)): - clip = array[i, j] - w, h = clip.size - # if clip not fulfill row width or column height - if (w < ch) or (h < rw): - clip = CompositeVideoClip( - [clip.with_position("center")], size=(ch, rw), bg_color=bg_color - ).with_duration(clip.duration) - - array[i, j] = clip.with_position((x, y)) - - return CompositeVideoClip(array.flatten(), size=(xs[-1], ys[-1]), bg_color=bg_color)
- - -
[docs]def concatenate_videoclips( - clips, method="chain", transition=None, bg_color=None, is_mask=False, padding=0 -): - """Concatenates several video clips. - - Returns a video clip made by clip by concatenating several video clips. - (Concatenated means that they will be played one after another). - - There are two methods: - - - method="chain": will produce a clip that simply outputs - the frames of the successive clips, without any correction if they are - not of the same size of anything. If none of the clips have masks the - resulting clip has no mask, else the mask is a concatenation of masks - (using completely opaque for clips that don't have masks, obviously). - If you have clips of different size and you want to write directly the - result of the concatenation to a file, use the method "compose" instead. - - - method="compose", if the clips do not have the same resolution, the final - resolution will be such that no clip has to be resized. - As a consequence the final clip has the height of the highest clip and the - width of the widest clip of the list. All the clips with smaller dimensions - will appear centered. The border will be transparent if mask=True, else it - will be of the color specified by ``bg_color``. - - The clip with the highest FPS will be the FPS of the result clip. - - Parameters - ---------- - clips - A list of video clips which must all have their ``duration`` - attributes set. - method - "chain" or "compose": see above. - transition - A clip that will be played between each two clips of the list. - - bg_color - Only for method='compose'. Color of the background. - Set to None for a transparent clip - - padding - Only for method='compose'. Duration during two consecutive clips. - Note that for negative padding, a clip will partly play at the same - time as the clip it follows (negative padding is cool for clips who fade - in on one another). A non-null padding automatically sets the method to - `compose`. - - """ - if transition is not None: - clip_transition_pairs = [[v, transition] for v in clips[:-1]] - clips = reduce(lambda x, y: x + y, clip_transition_pairs) + [clips[-1]] - transition = None - - timings = np.cumsum([0] + [clip.duration for clip in clips]) - - sizes = [clip.size for clip in clips] - - w = max(size[0] for size in sizes) - h = max(size[1] for size in sizes) - - timings = np.maximum(0, timings + padding * np.arange(len(timings))) - timings[-1] -= padding # Last element is the duration of the whole - - if method == "chain": - - def make_frame(t): - i = max([i for i, e in enumerate(timings) if e <= t]) - return clips[i].get_frame(t - timings[i]) - - def get_mask(clip): - mask = clip.mask or ColorClip([1, 1], color=1, is_mask=True) - if mask.duration is None: - mask.duration = clip.duration - return mask - - result = VideoClip(is_mask=is_mask, make_frame=make_frame) - if any([clip.mask is not None for clip in clips]): - masks = [get_mask(clip) for clip in clips] - result.mask = concatenate_videoclips(masks, method="chain", is_mask=True) - result.clips = clips - elif method == "compose": - result = CompositeVideoClip( - [ - clip.with_start(t).with_position("center") - for (clip, t) in zip(clips, timings) - ], - size=(w, h), - bg_color=bg_color, - is_mask=is_mask, - ) - else: - raise Exception( - "MoviePy Error: The 'method' argument of " - "concatenate_videoclips must be 'chain' or 'compose'" - ) - - result.timings = timings - - result.start_times = timings[:-1] - result.start, result.duration, result.end = 0, timings[-1], timings[-1] - - audio_t = [ - (clip.audio, t) for clip, t in zip(clips, timings) if clip.audio is not None - ] - if audio_t: - result.audio = CompositeAudioClip([a.with_start(t) for a, t in audio_t]) - - fpss = [clip.fps for clip in clips if getattr(clip, "fps", None) is not None] - result.fps = max(fpss) if fpss else None - return result
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/AccelDecel.html b/_modules/moviepy/video/fx/AccelDecel.html deleted file mode 100644 index a68ddcb8a..000000000 --- a/_modules/moviepy/video/fx/AccelDecel.html +++ /dev/null @@ -1,527 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.AccelDecel — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.AccelDecel

-from dataclasses import dataclass
-
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class AccelDecel(Effect): - """Accelerates and decelerates a clip, useful for GIF making. - - Parameters - ---------- - - new_duration : float - Duration for the new transformed clip. If None, will be that of the - current clip. - - abruptness : float - Slope shape in the acceleration-deceleration function. It will depend - on the value of the parameter: - - * ``-1 < abruptness < 0``: speed up, down, up. - * ``abruptness == 0``: no effect. - * ``abruptness > 0``: speed down, up, down. - - soonness : float - For positive abruptness, determines how soon the transformation occurs. - Should be a positive number. - - Raises - ------ - - ValueError - When ``sooness`` argument is lower than 0. - - Examples - -------- - - The following graphs show functions generated by different combinations - of arguments, where the value of the slopes represents the speed of the - videos generated, being the linear function (in red) a combination that - does not produce any transformation. - - .. image:: /_static/medias/accel_decel-fx-params.png - :alt: acced_decel FX parameters combinations - """ - - new_duration: float = None - abruptness: float = 1.0 - soonness: float = 1.0 - - def _f_accel_decel( - self, t, old_duration, new_duration, abruptness=1.0, soonness=1.0 - ): - a = 1.0 + abruptness - - def _f(t): - def f1(t): - return (0.5) ** (1 - a) * (t**a) - - def f2(t): - return 1 - f1(1 - t) - - return (t < 0.5) * f1(t) + (t >= 0.5) * f2(t) - - return old_duration * _f((t / new_duration) ** soonness) - -
[docs] def apply(self, clip): - """Apply the effect to the clip.""" - if self.new_duration is None: - self.new_duration = clip.duration - - if self.soonness < 0: - raise ValueError("'sooness' should be a positive number") - - return clip.time_transform( - lambda t: self._f_accel_decel( - t=t, - old_duration=clip.duration, - new_duration=self.new_duration, - abruptness=self.abruptness, - soonness=self.soonness, - ) - ).with_duration(self.new_duration)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/BlackAndWhite.html b/_modules/moviepy/video/fx/BlackAndWhite.html deleted file mode 100644 index bc36740f6..000000000 --- a/_modules/moviepy/video/fx/BlackAndWhite.html +++ /dev/null @@ -1,482 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.BlackAndWhite — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.BlackAndWhite

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class BlackAndWhite(Effect): - """Desaturates the picture, makes it black and white. - Parameter RGB allows to set weights for the different color - channels. - If RBG is 'CRT_phosphor' a special set of values is used. - preserve_luminosity maintains the sum of RGB to 1. - """ - - RGB: str = None - preserve_luminosity: bool = True - -
[docs] def apply(self, clip): - """Apply the effect to the clip.""" - if self.RGB is None: - self.RGB = [1, 1, 1] - - if self.RGB == "CRT_phosphor": - self.RGB = [0.2125, 0.7154, 0.0721] - - R, G, B = ( - 1.0 - * np.array(self.RGB) - / (sum(self.RGB) if self.preserve_luminosity else 1) - ) - - def filter(im): - im = R * im[:, :, 0] + G * im[:, :, 1] + B * im[:, :, 2] - return np.dstack(3 * [im]).astype("uint8") - - return clip.image_transform(filter)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Blink.html b/_modules/moviepy/video/fx/Blink.html deleted file mode 100644 index 547bf27df..000000000 --- a/_modules/moviepy/video/fx/Blink.html +++ /dev/null @@ -1,471 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Blink — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Blink

-from dataclasses import dataclass
-
-from moviepy.Effect import Effect
-
-
-
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Crop.html b/_modules/moviepy/video/fx/Crop.html deleted file mode 100644 index ba370cd97..000000000 --- a/_modules/moviepy/video/fx/Crop.html +++ /dev/null @@ -1,524 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Crop — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Crop

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class Crop(Effect): - """Effect to crop a clip to get a new clip in which just a rectangular - subregion of the original clip is conserved. `x1,y1` indicates the top left - corner and `x2,y2` is the lower right corner of the cropped region. All - coordinates are in pixels. Float numbers are accepted. - - To crop an arbitrary rectangle: - - >>> Crop(x1=50, y1=60, x2=460, y2=275) - - Only remove the part above y=30: - - >>> Crop(y1=30) - - Crop a rectangle that starts 10 pixels left and is 200px wide - - >>> Crop(x1=10, width=200) - - Crop a rectangle centered in x,y=(300,400), width=50, height=150 : - - >>> Crop(x_center=300, y_center=400, width=50, height=150) - - Any combination of the above should work, like for this rectangle - centered in x=300, with explicit y-boundaries: - - >>> Crop(x_center=300, width=400, y1=100, y2=600) - - """ - - x1: int = None - y1: int = None - x2: int = None - y2: int = None - width: int = None - height: int = None - x_center: int = None - y_center: int = None - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if self.width and self.x1 is not None: - self.x2 = self.x1 + self.width - elif self.width and self.x2 is not None: - self.x1 = self.x2 - self.width - - if self.height and self.y1 is not None: - self.y2 = self.y1 + self.height - elif self.height and self.y2 is not None: - self.y1 = self.y2 - self.height - - if self.x_center: - self.x1, self.x2 = ( - self.x_center - self.width / 2, - self.x_center + self.width / 2, - ) - - if self.y_center: - self.y1, self.y2 = ( - self.y_center - self.height / 2, - self.y_center + self.height / 2, - ) - - self.x1 = self.x1 or 0 - self.y1 = self.y1 or 0 - self.x2 = self.x2 or clip.size[0] - self.y2 = self.y2 or clip.size[1] - - return clip.image_transform( - lambda frame: frame[ - int(self.y1) : int(self.y2), int(self.x1) : int(self.x2) - ], - apply_to=["mask"], - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/CrossFadeIn.html b/_modules/moviepy/video/fx/CrossFadeIn.html deleted file mode 100644 index 3876a30b6..000000000 --- a/_modules/moviepy/video/fx/CrossFadeIn.html +++ /dev/null @@ -1,471 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.CrossFadeIn — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.CrossFadeIn

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-from moviepy.video.fx.FadeIn import FadeIn
-
-
-
[docs]@dataclass -class CrossFadeIn(Effect): - """Makes the clip appear progressively, over ``duration`` seconds. - Only works when the clip is included in a CompositeVideoClip. - """ - - duration: float - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - if clip.mask is None: - clip = clip.with_add_mask() - - clip.mask.duration = clip.duration - clip.mask = clip.mask.with_effects([FadeIn(self.duration)]) - - return clip
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/CrossFadeOut.html b/_modules/moviepy/video/fx/CrossFadeOut.html deleted file mode 100644 index 229528483..000000000 --- a/_modules/moviepy/video/fx/CrossFadeOut.html +++ /dev/null @@ -1,471 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.CrossFadeOut — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.CrossFadeOut

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-from moviepy.video.fx.FadeOut import FadeOut
-
-
-
[docs]@dataclass -class CrossFadeOut(Effect): - """Makes the clip disappear progressively, over ``duration`` seconds. - Only works when the clip is included in a CompositeVideoClip. - """ - - duration: float - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - if clip.mask is None: - clip = clip.with_add_mask() - - clip.mask.duration = clip.duration - clip.mask = clip.mask.with_effects([FadeOut(self.duration)]) - - return clip
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/EvenSize.html b/_modules/moviepy/video/fx/EvenSize.html deleted file mode 100644 index 537196d7e..000000000 --- a/_modules/moviepy/video/fx/EvenSize.html +++ /dev/null @@ -1,478 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.EvenSize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.EvenSize

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class EvenSize(Effect): - """Crops the clip to make dimensions even.""" - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - w, h = clip.size - w_even = w % 2 == 0 - h_even = h % 2 == 0 - if w_even and h_even: - return clip - - if not w_even and not h_even: - - def image_filter(a): - return a[:-1, :-1, :] - - elif h_even: - - def image_filter(a): - return a[:, :-1, :] - - else: - - def image_filter(a): - return a[:-1, :, :] - - return clip.image_transform(image_filter, apply_to=["mask"])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/FadeIn.html b/_modules/moviepy/video/fx/FadeIn.html deleted file mode 100644 index 3d6ace322..000000000 --- a/_modules/moviepy/video/fx/FadeIn.html +++ /dev/null @@ -1,480 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.FadeIn — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.FadeIn

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class FadeIn(Effect): - """Makes the clip progressively appear from some color (black by default), - over ``duration`` seconds at the beginning of the clip. Can be used for - masks too, where the initial color must be a number between 0 and 1. - - For cross-fading (progressive appearance or disappearance of a clip - over another clip, see ``CrossFadeIn`` - """ - - duration: float - initial_color: list = None - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if self.initial_color is None: - self.initial_color = 0 if clip.is_mask else [0, 0, 0] - - self.initial_color = np.array(self.initial_color) - - def filter(get_frame, t): - if t >= self.duration: - return get_frame(t) - else: - fading = 1.0 * t / self.duration - return fading * get_frame(t) + (1 - fading) * self.initial_color - - return clip.transform(filter)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/FadeOut.html b/_modules/moviepy/video/fx/FadeOut.html deleted file mode 100644 index 3b00c0142..000000000 --- a/_modules/moviepy/video/fx/FadeOut.html +++ /dev/null @@ -1,483 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.FadeOut — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.FadeOut

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class FadeOut(Effect): - """Makes the clip progressively fade to some color (black by default), - over ``duration`` seconds at the end of the clip. Can be used for masks too, - where the final color must be a number between 0 and 1. - - For cross-fading (progressive appearance or disappearance of a clip over another - clip), see ``CrossFadeOut`` - """ - - duration: float - final_color: list = None - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - if self.final_color is None: - self.final_color = 0 if clip.is_mask else [0, 0, 0] - - self.final_color = np.array(self.final_color) - - def filter(get_frame, t): - if (clip.duration - t) >= self.duration: - return get_frame(t) - else: - fading = 1.0 * (clip.duration - t) / self.duration - return fading * get_frame(t) + (1 - fading) * self.final_color - - return clip.transform(filter)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Freeze.html b/_modules/moviepy/video/fx/Freeze.html deleted file mode 100644 index 114a6fcbc..000000000 --- a/_modules/moviepy/video/fx/Freeze.html +++ /dev/null @@ -1,487 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Freeze — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Freeze

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-from moviepy.video.compositing.CompositeVideoClip import concatenate_videoclips
-
-
-
[docs]@dataclass -class Freeze(Effect): - """Momentarily freeze the clip at time t. - - Set `t='end'` to freeze the clip at the end (actually it will freeze on the - frame at time clip.duration - padding_end seconds - 1 / clip_fps). - With ``duration`` you can specify the duration of the freeze. - With ``total_duration`` you can specify the total duration of - the clip and the freeze (i.e. the duration of the freeze is - automatically computed). One of them must be provided. - """ - - t: float = 0 - freeze_duration: float = None - total_duration: float = None - padding_end: float = 0 - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - if self.t == "end": - self.t = clip.duration - self.padding_end - 1 / clip.fps - - if self.freeze_duration is None: - if self.total_duration is None: - raise ValueError( - "You must provide either 'freeze_duration' or 'total_duration'" - ) - self.freeze_duration = self.total_duration - clip.duration - - before = [clip[: self.t]] if (self.t != 0) else [] - freeze = [clip.to_ImageClip(self.t).with_duration(self.freeze_duration)] - after = [clip[self.t :]] if (self.t != clip.duration) else [] - return concatenate_videoclips(before + freeze + after)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/FreezeRegion.html b/_modules/moviepy/video/fx/FreezeRegion.html deleted file mode 100644 index c459ff488..000000000 --- a/_modules/moviepy/video/fx/FreezeRegion.html +++ /dev/null @@ -1,512 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.FreezeRegion — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.FreezeRegion

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
-from moviepy.video.fx.Crop import Crop
-
-
-
[docs]@dataclass -class FreezeRegion(Effect): - """Freezes one region of the clip while the rest remains animated. - - You can choose one of three methods by providing either `region`, - `outside_region`, or `mask`. - - Parameters - ---------- - - t - Time at which to freeze the freezed region. - - region - A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) - which will be freezed. You can provide outside_region or mask instead. - - outside_region - A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) - which will be the only non-freezed region. - - mask - If not None, will overlay a freezed version of the clip on the current clip, - with the provided mask. In other words, the "visible" pixels in the mask - indicate the freezed region in the final picture. - - """ - - t: float = 0 - region: tuple = None - outside_region: tuple = None - mask: Clip = None - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if self.region is not None: - x1, y1, _x2, _y2 = self.region - freeze = ( - clip.with_effects([Crop(*self.region)]) - .to_ImageClip(t=self.t) - .with_duration(clip.duration) - .with_position((x1, y1)) - ) - return CompositeVideoClip([clip, freeze]) - - elif self.outside_region is not None: - x1, y1, x2, y2 = self.outside_region - animated_region = clip.with_effects( - [Crop(*self.outside_region)] - ).with_position((x1, y1)) - freeze = clip.to_ImageClip(t=self.t).with_duration(clip.duration) - return CompositeVideoClip([freeze, animated_region]) - - elif self.mask is not None: - freeze = ( - clip.to_ImageClip(t=self.t) - .with_duration(clip.duration) - .with_mask(self.mask) - ) - return CompositeVideoClip([clip, freeze])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/GammaCorrection.html b/_modules/moviepy/video/fx/GammaCorrection.html deleted file mode 100644 index 14db9c208..000000000 --- a/_modules/moviepy/video/fx/GammaCorrection.html +++ /dev/null @@ -1,464 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.GammaCorrection — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.GammaCorrection

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class GammaCorrection(Effect): - """Gamma-correction of a video clip.""" - - gamma: float - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - - def filter(im): - corrected = 255 * (1.0 * im / 255) ** self.gamma - return corrected.astype("uint8") - - return clip.image_transform(filter)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/HeadBlur.html b/_modules/moviepy/video/fx/HeadBlur.html deleted file mode 100644 index bb8809ab8..000000000 --- a/_modules/moviepy/video/fx/HeadBlur.html +++ /dev/null @@ -1,489 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.HeadBlur — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.HeadBlur

-from dataclasses import dataclass
-
-import numpy as np
-from PIL import Image, ImageDraw, ImageFilter
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class HeadBlur(Effect): - """Returns a filter that will blur a moving part (a head ?) of the frames. - - The position of the blur at time t is defined by (fx(t), fy(t)), the radius - of the blurring by ``radius`` and the intensity of the blurring by ``intensity``. - """ - - fx: callable - fy: callable - radius: float - intensity: float = None - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if self.intensity is None: - self.intensity = int(2 * self.radius / 3) - - def filter(gf, t): - im = gf(t).copy() - h, w, d = im.shape - x, y = int(self.fx(t)), int(self.fy(t)) - x1, x2 = max(0, x - self.radius), min(x + self.radius, w) - y1, y2 = max(0, y - self.radius), min(y + self.radius, h) - - image = Image.fromarray(im) - mask = Image.new("RGB", image.size) - draw = ImageDraw.Draw(mask) - draw.ellipse([x1, y1, x2, y2], fill=(255, 255, 255)) - - blurred = image.filter(ImageFilter.GaussianBlur(radius=self.intensity)) - - res = np.where(np.array(mask) > 0, np.array(blurred), np.array(image)) - return res - - return clip.transform(filter)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/InvertColors.html b/_modules/moviepy/video/fx/InvertColors.html deleted file mode 100644 index f341c0203..000000000 --- a/_modules/moviepy/video/fx/InvertColors.html +++ /dev/null @@ -1,462 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.InvertColors — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.InvertColors

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class InvertColors(Effect): - """Returns the color-inversed clip. - - The values of all pixels are replaced with (255-v) or (1-v) for masks - Black becomes white, green becomes purple, etc. - """ - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - maxi = 1.0 if clip.is_mask else 255 - return clip.image_transform(lambda f: maxi - f)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Loop.html b/_modules/moviepy/video/fx/Loop.html deleted file mode 100644 index 95ad15e23..000000000 --- a/_modules/moviepy/video/fx/Loop.html +++ /dev/null @@ -1,487 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Loop — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Loop

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class Loop(Effect): - """ - Returns a clip that plays the current clip in an infinite loop. - Ideal for clips coming from GIFs. - - Parameters - ---------- - - n - Number of times the clip should be played. If `None` the - the clip will loop indefinitely (i.e. with no set duration). - - duration - Total duration of the clip. Can be specified instead of n. - """ - - n: int = None - duration: float = None - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - previous_duration = clip.duration - clip = clip.time_transform( - lambda t: t % previous_duration, apply_to=["mask", "audio"] - ) - - if self.n: - self.duration = self.n * previous_duration - - if self.duration: - clip = clip.with_duration(self.duration) - - return clip
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/LumContrast.html b/_modules/moviepy/video/fx/LumContrast.html deleted file mode 100644 index d57e37662..000000000 --- a/_modules/moviepy/video/fx/LumContrast.html +++ /dev/null @@ -1,471 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.LumContrast — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.LumContrast

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class LumContrast(Effect): - """Luminosity-contrast correction of a clip.""" - - lum: float = 0 - contrast: float = 0 - contrast_threshold: float = 127 - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - - def image_filter(im): - im = 1.0 * im # float conversion - corrected = ( - im + self.lum + self.contrast * (im - float(self.contrast_threshold)) - ) - corrected[corrected < 0] = 0 - corrected[corrected > 255] = 255 - return corrected.astype("uint8") - - return clip.image_transform(image_filter)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/MakeLoopable.html b/_modules/moviepy/video/fx/MakeLoopable.html deleted file mode 100644 index 8e76f072e..000000000 --- a/_modules/moviepy/video/fx/MakeLoopable.html +++ /dev/null @@ -1,474 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.MakeLoopable — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.MakeLoopable

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
-from moviepy.video.fx.CrossFadeIn import CrossFadeIn
-
-
-
[docs]@dataclass -class MakeLoopable(Effect): - """Makes the clip fade in progressively at its own end, this way it can be - looped indefinitely. - - Parameters - ---------- - - overlap_duration : float - Duration of the fade-in (in seconds). - """ - - overlap_duration: float - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - clip2 = clip.with_effects([CrossFadeIn(self.overlap_duration)]).with_start( - clip.duration - self.overlap_duration - ) - return CompositeVideoClip([clip, clip2]).with_subclip( - self.overlap_duration, clip.duration - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Margin.html b/_modules/moviepy/video/fx/Margin.html deleted file mode 100644 index 20c8a38df..000000000 --- a/_modules/moviepy/video/fx/Margin.html +++ /dev/null @@ -1,534 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Margin — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Margin

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-from moviepy.video.VideoClip import ImageClip
-
-
-
[docs]@dataclass -class Margin(Effect): - """Draws an external margin all around the frame. - - Parameters - ---------- - - margin_size : int, optional - If not ``None``, then the new clip has a margin size of - size ``margin_size`` in pixels on the left, right, top, and bottom. - - left : int, optional - If ``margin_size=None``, margin size for the new clip in left direction. - - right : int, optional - If ``margin_size=None``, margin size for the new clip in right direction. - - top : int, optional - If ``margin_size=None``, margin size for the new clip in top direction. - - bottom : int, optional - If ``margin_size=None``, margin size for the new clip in bottom direction. - - color : tuple, optional - Color of the margin. - - opacity : float, optional - Opacity of the margin. Setting this value to 0 yields transparent margins. - """ - - margin_size: int = None - left: int = 0 - right: int = 0 - top: int = 0 - bottom: int = 0 - color: tuple = (0, 0, 0) - opacity: float = 1.0 - -
[docs] def add_margin(self, clip: Clip): - """Add margins to the clip.""" - if (self.opacity != 1.0) and (clip.mask is None) and not (clip.is_mask): - clip = clip.with_add_mask() - - if self.margin_size is not None: - self.left = self.right = self.top = self.bottom = self.margin_size - - def make_bg(w, h): - new_w, new_h = w + self.left + self.right, h + self.top + self.bottom - if clip.is_mask: - shape = (new_h, new_w) - bg = np.tile(self.opacity, (new_h, new_w)).astype(float).reshape(shape) - else: - shape = (new_h, new_w, 3) - bg = np.tile(self.color, (new_h, new_w)).reshape(shape) - return bg - - if isinstance(clip, ImageClip): - im = make_bg(clip.w, clip.h) - im[self.top : self.top + clip.h, self.left : self.left + clip.w] = clip.img - return clip.image_transform(lambda pic: im) - - else: - - def filter(get_frame, t): - pic = get_frame(t) - h, w = pic.shape[:2] - im = make_bg(w, h) - im[self.top : self.top + h, self.left : self.left + w] = pic - return im - - return clip.transform(filter)
- -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - # We apply once on clip and once on mask if we have one - clip = self.add_margin(clip=clip) - - if clip.mask: - clip.mask = self.add_margin(clip=clip.mask) - - return clip
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/MaskColor.html b/_modules/moviepy/video/fx/MaskColor.html deleted file mode 100644 index b48f47687..000000000 --- a/_modules/moviepy/video/fx/MaskColor.html +++ /dev/null @@ -1,489 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.MaskColor — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.MaskColor

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class MaskColor(Effect): - """Returns a new clip with a mask for transparency where the original - clip is of the given color. - - You can also have a "progressive" mask by specifying a non-null distance - threshold ``threshold``. In this case, if the distance between a pixel and - the given color is d, the transparency will be - - d**stiffness / (threshold**stiffness + d**stiffness) - - which is 1 when d>>threshold and 0 for d<<threshold, the stiffness of the - effect being parametrized by ``stiffness`` - """ - - color: tuple = (0, 0, 0) - threshold: float = 0 - stiffness: float = 1 - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - color = np.array(self.color) - - def hill(x): - if self.threshold: - return x**self.stiffness / ( - self.threshold**self.stiffness + x**self.stiffness - ) - else: - return 1.0 * (x != 0) - - def flim(im): - return hill(np.sqrt(((im - color) ** 2).sum(axis=2))) - - mask = clip.image_transform(flim) - mask.is_mask = True - return clip.with_mask(mask)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/MasksAnd.html b/_modules/moviepy/video/fx/MasksAnd.html deleted file mode 100644 index 621a1a0ab..000000000 --- a/_modules/moviepy/video/fx/MasksAnd.html +++ /dev/null @@ -1,494 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.MasksAnd — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.MasksAnd

-from dataclasses import dataclass
-from typing import Union
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-from moviepy.video.VideoClip import ImageClip
-
-
-
[docs]@dataclass -class MasksAnd(Effect): - """Returns the logical 'and' (minimum pixel color values) between two masks. - - The result has the duration of the clip to which has been applied, if it has any. - - Parameters - ---------- - - other_clip ImageClip or np.ndarray - Clip used to mask the original clip. - - Examples - -------- - - >>> clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red - >>> mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green - >>> masked_clip = clip.with_effects([vfx.MasksAnd(mask)]) # black - >>> masked_clip.get_frame(0) - [[[0 0 0]]] - """ - - other_clip: Union[Clip, np.ndarray] - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - # to ensure that 'and' of two ImageClips will be an ImageClip - if isinstance(self.other_clip, ImageClip): - self.other_clip = self.other_clip.img - - if isinstance(self.other_clip, np.ndarray): - return clip.image_transform( - lambda frame: np.minimum(frame, self.other_clip) - ) - else: - return clip.transform( - lambda get_frame, t: np.minimum( - get_frame(t), self.other_clip.get_frame(t) - ) - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/MasksOr.html b/_modules/moviepy/video/fx/MasksOr.html deleted file mode 100644 index 54c95179e..000000000 --- a/_modules/moviepy/video/fx/MasksOr.html +++ /dev/null @@ -1,494 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.MasksOr — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.MasksOr

-from dataclasses import dataclass
-from typing import Union
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-from moviepy.video.VideoClip import ImageClip
-
-
-
[docs]@dataclass -class MasksOr(Effect): - """Returns the logical 'or' (maximum pixel color values) between two masks. - - The result has the duration of the clip to which has been applied, if it has any. - - Parameters - ---------- - - other_clip ImageClip or np.ndarray - Clip used to mask the original clip. - - Examples - -------- - - >>> clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red - >>> mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green - >>> masked_clip = clip.with_effects([vfx.MasksOr(mask)]) # yellow - >>> masked_clip.get_frame(0) - [[[255 255 0]]] - """ - - other_clip: Union[Clip, np.ndarray] - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - # to ensure that 'or' of two ImageClips will be an ImageClip - if isinstance(self.other_clip, ImageClip): - self.other_clip = self.other_clip.img - - if isinstance(self.other_clip, np.ndarray): - return clip.image_transform( - lambda frame: np.maximum(frame, self.other_clip) - ) - else: - return clip.transform( - lambda get_frame, t: np.maximum( - get_frame(t), self.other_clip.get_frame(t) - ) - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/MirrorX.html b/_modules/moviepy/video/fx/MirrorX.html deleted file mode 100644 index ee2332edf..000000000 --- a/_modules/moviepy/video/fx/MirrorX.html +++ /dev/null @@ -1,460 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.MirrorX — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.MirrorX

-from dataclasses import dataclass
-from typing import List, Union
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class MirrorX(Effect): - """Flips the clip horizontally (and its mask too, by default).""" - - apply_to: Union[List, str] = "mask" - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - return clip.image_transform(lambda img: img[:, ::-1], apply_to=self.apply_to)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/MirrorY.html b/_modules/moviepy/video/fx/MirrorY.html deleted file mode 100644 index 33941e3af..000000000 --- a/_modules/moviepy/video/fx/MirrorY.html +++ /dev/null @@ -1,460 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.MirrorY — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.MirrorY

-from dataclasses import dataclass
-from typing import List, Union
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class MirrorY(Effect): - """Flips the clip vertically (and its mask too, by default).""" - - apply_to: Union[List, str] = "mask" - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - return clip.image_transform(lambda img: img[::-1], apply_to=self.apply_to)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/MultiplyColor.html b/_modules/moviepy/video/fx/MultiplyColor.html deleted file mode 100644 index 1ae1b072a..000000000 --- a/_modules/moviepy/video/fx/MultiplyColor.html +++ /dev/null @@ -1,467 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.MultiplyColor — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.MultiplyColor

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class MultiplyColor(Effect): - """ - Multiplies the clip's colors by the given factor, can be used - to decrease or increase the clip's brightness (is that the - right word ?) - """ - - factor: float - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - return clip.image_transform( - lambda frame: np.minimum(255, (self.factor * frame)).astype("uint8") - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/MultiplySpeed.html b/_modules/moviepy/video/fx/MultiplySpeed.html deleted file mode 100644 index c67b68318..000000000 --- a/_modules/moviepy/video/fx/MultiplySpeed.html +++ /dev/null @@ -1,475 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.MultiplySpeed — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.MultiplySpeed

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class MultiplySpeed(Effect): - """Returns a clip playing the current clip but at a speed multiplied by ``factor``. - - Instead of factor one can indicate the desired ``final_duration`` of the clip, and - the factor will be automatically computed. The same effect is applied to the clip's - audio and mask if any. - """ - - factor: float = None - final_duration: float = None - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if self.final_duration: - self.factor = 1.0 * clip.duration / self.final_duration - - new_clip = clip.time_transform( - lambda t: self.factor * t, apply_to=["mask", "audio"] - ) - - if clip.duration is not None: - new_clip = new_clip.with_duration(1.0 * clip.duration / self.factor) - - return new_clip
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Painting.html b/_modules/moviepy/video/fx/Painting.html deleted file mode 100644 index 64e6a5a8f..000000000 --- a/_modules/moviepy/video/fx/Painting.html +++ /dev/null @@ -1,507 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Painting — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Painting

-from dataclasses import dataclass
-
-import numpy as np
-from PIL import Image, ImageFilter
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class Painting(Effect): - """Transforms any photo into some kind of painting. - - Transforms any photo into some kind of painting. Saturation - tells at which point the colors of the result should be - flashy. ``black`` gives the amount of black lines wanted. - - np_image : a numpy image - """ - - saturation: float = 1.4 - black: float = 0.006 - -
[docs] def to_painting(self, np_image, saturation=1.4, black=0.006): - """Transforms any photo into some kind of painting. - - Transforms any photo into some kind of painting. Saturation - tells at which point the colors of the result should be - flashy. ``black`` gives the amount of black lines wanted. - - np_image : a numpy image - """ - image = Image.fromarray(np_image) - image = image.filter(ImageFilter.EDGE_ENHANCE_MORE) - - # Convert the image to grayscale - grayscale_image = image.convert("L") - - # Find the image edges - edges_image = grayscale_image.filter(ImageFilter.FIND_EDGES) - - # Convert the edges image to a numpy array - edges = np.array(edges_image) - - # Create the darkening effect - darkening = black * (255 * np.dstack(3 * [edges])) - - # Apply the painting effect - painting = saturation * np.array(image) - darkening - - # Clip the pixel values to the valid range of 0-255 - painting = np.maximum(0, np.minimum(255, painting)) - - # Convert the pixel values to unsigned 8-bit integers - painting = painting.astype("uint8") - - return painting
- -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - return clip.image_transform( - lambda im: self.to_painting(im, self.saturation, self.black) - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Resize.html b/_modules/moviepy/video/fx/Resize.html deleted file mode 100644 index 70b6a9725..000000000 --- a/_modules/moviepy/video/fx/Resize.html +++ /dev/null @@ -1,600 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Resize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Resize

-import numbers
-from dataclasses import dataclass
-from typing import Union
-
-import numpy as np
-from PIL import Image
-
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class Resize(Effect): - """Effect returning a video clip that is a resized version of the clip. - - Parameters - ---------- - - new_size : tuple or float or function, optional - Can be either - - ``(width, height)`` in pixels or a float representing - - A scaling factor, like ``0.5``. - - A function of time returning one of these. - - height : int, optional - Height of the new clip in pixels. The width is then computed so - that the width/height ratio is conserved. - - width : int, optional - Width of the new clip in pixels. The height is then computed so - that the width/height ratio is conserved. - - Examples - -------- - - >>> myClip.with_effects([vfx.Resize((460,720))]) # New resolution: (460,720) - >>> myClip.with_effects([vfx.Resize(0.6)]) # width and height multiplied by 0.6 - >>> myClip.with_effects([vfx.Resize(width=800)]) # height computed automatically. - >>> myClip.with_effects([vfx.Resize(lambda t : 1+0.02*t)]) # slow clip swelling - """ - - new_size: Union[tuple, float, callable] = None - height: int = None - width: int = None - apply_to_mask: bool = True - -
[docs] def resizer(self, pic, new_size): - """Resize the image using PIL.""" - new_size = list(map(int, new_size)) - pil_img = Image.fromarray(pic) - resized_pil = pil_img.resize(new_size, Image.Resampling.LANCZOS) - return np.array(resized_pil)
- -
[docs] def apply(self, clip): - """Apply the effect to the clip.""" - w, h = clip.size - - if self.new_size is not None: - - def translate_new_size(new_size_): - """Returns a [w, h] pair from `new_size_`. If `new_size_` is a - scalar, then work out the correct pair using the clip's size. - Otherwise just return `new_size_` - """ - if isinstance(new_size_, numbers.Number): - return [new_size_ * w, new_size_ * h] - else: - return new_size_ - - if hasattr(self.new_size, "__call__"): - # The resizing is a function of time - - def get_new_size(t): - return translate_new_size(self.new_size(t)) - - if clip.is_mask: - - def filter(get_frame, t): - return ( - self.resizer( - (255 * get_frame(t)).astype("uint8"), get_new_size(t) - ) - / 255.0 - ) - - else: - - def filter(get_frame, t): - return self.resizer( - get_frame(t).astype("uint8"), get_new_size(t) - ) - - newclip = clip.transform( - filter, - keep_duration=True, - apply_to=(["mask"] if self.apply_to_mask else []), - ) - if self.apply_to_mask and clip.mask is not None: - newclip.mask = clip.mask.with_effects( - [Resize(self.new_size, apply_to_mask=False)] - ) - - return newclip - - else: - self.new_size = translate_new_size(self.new_size) - - elif self.height is not None: - if hasattr(self.height, "__call__"): - - def func(t): - return 1.0 * int(self.height(t)) / h - - return clip.with_effects([Resize(func)]) - - else: - self.new_size = [w * self.height / h, self.height] - - elif self.width is not None: - if hasattr(self.width, "__call__"): - - def func(t): - return 1.0 * self.width(t) / w - - return clip.with_effects([Resize(func)]) - - else: - self.new_size = [self.width, h * self.width / w] - else: - raise ValueError( - "You must provide either 'new_size' or 'height' or 'width'" - ) - - # From here, the resizing is constant (not a function of time), size=newsize - - if clip.is_mask: - - def image_filter(pic): - return ( - 1.0 - * self.resizer((255 * pic).astype("uint8"), self.new_size) - / 255.0 - ) - - else: - - def image_filter(pic): - return self.resizer(pic.astype("uint8"), self.new_size) - - new_clip = clip.image_transform(image_filter) - - if self.apply_to_mask and clip.mask is not None: - new_clip.mask = clip.mask.with_effects( - [Resize(self.new_size, apply_to_mask=False)] - ) - - return new_clip
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Rotate.html b/_modules/moviepy/video/fx/Rotate.html deleted file mode 100644 index 8a59117d0..000000000 --- a/_modules/moviepy/video/fx/Rotate.html +++ /dev/null @@ -1,572 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Rotate — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Rotate

-import math
-from dataclasses import dataclass
-
-import numpy as np
-from PIL import Image
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class Rotate(Effect): - """ - Rotates the specified clip by ``angle`` degrees (or radians) anticlockwise - If the angle is not a multiple of 90 (degrees) or ``center``, ``translate``, - and ``bg_color`` are not ``None``, there will be black borders. - You can make them transparent with: - - >>> new_clip = clip.with_add_mask().rotate(72) - - Parameters - ---------- - - clip : VideoClip - A video clip. - - angle : float - Either a value or a function angle(t) representing the angle of rotation. - - unit : str, optional - Unit of parameter `angle` (either "deg" for degrees or "rad" for radians). - - resample : str, optional - An optional resampling filter. One of "nearest", "bilinear", or "bicubic". - - expand : bool, optional - If true, expands the output image to make it large enough to hold the - entire rotated image. If false or omitted, make the output image the same - size as the input image. - - translate : tuple, optional - An optional post-rotate translation (a 2-tuple). - - center : tuple, optional - Optional center of rotation (a 2-tuple). Origin is the upper left corner. - - bg_color : tuple, optional - An optional color for area outside the rotated image. Only has effect if - ``expand`` is true. - """ - - angle: float - unit: str = "deg" - resample: str = "bicubic" - expand: bool = True - center: tuple = None - translate: tuple = None - bg_color: tuple = None - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - try: - resample = { - "bilinear": Image.BILINEAR, - "nearest": Image.NEAREST, - "bicubic": Image.BICUBIC, - }[self.resample] - except KeyError: - raise ValueError( - "'resample' argument must be either 'bilinear', 'nearest' or 'bicubic'" - ) - - if hasattr(self.angle, "__call__"): - get_angle = self.angle - else: - get_angle = lambda t: self.angle - - def filter(get_frame, t): - angle = get_angle(t) - im = get_frame(t) - - if self.unit == "rad": - angle = math.degrees(angle) - - angle %= 360 - if not self.center and not self.translate and not self.bg_color: - if (angle == 0) and self.expand: - return im - if (angle == 90) and self.expand: - transpose = [1, 0] if len(im.shape) == 2 else [1, 0, 2] - return np.transpose(im, axes=transpose)[::-1] - elif (angle == 270) and self.expand: - transpose = [1, 0] if len(im.shape) == 2 else [1, 0, 2] - return np.transpose(im, axes=transpose)[:, ::-1] - elif (angle == 180) and self.expand: - return im[::-1, ::-1] - - pillow_kwargs = {} - - if self.bg_color is not None: - pillow_kwargs["fillcolor"] = self.bg_color - - if self.center is not None: - pillow_kwargs["center"] = self.center - - if self.translate is not None: - pillow_kwargs["translate"] = self.translate - - # PIL expects uint8 type data. However a mask image has values in the - # range [0, 1] and is of float type. To handle this we scale it up by - # a factor 'a' for use with PIL and then back again by 'a' afterwards. - if im.dtype == "float64": - # this is a mask image - a = 255.0 - else: - a = 1 - - # call PIL.rotate - return ( - np.array( - Image.fromarray(np.array(a * im).astype(np.uint8)).rotate( - angle, expand=self.expand, resample=resample, **pillow_kwargs - ) - ) - / a - ) - - return clip.transform(filter, apply_to=["mask"])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/Scroll.html b/_modules/moviepy/video/fx/Scroll.html deleted file mode 100644 index b8a65342f..000000000 --- a/_modules/moviepy/video/fx/Scroll.html +++ /dev/null @@ -1,502 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.Scroll — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.Scroll

-from moviepy.Effect import Effect
-
-
-
[docs]class Scroll(Effect): - """Effect that scrolls horizontally or vertically a clip, e.g. to make end credits - - Parameters - ---------- - w, h - The width and height of the final clip. Default to clip.w and clip.h - - x_speed, y_speed - The speed of the scroll in the x and y directions. - - x_start, y_start - The starting position of the scroll in the x and y directions. - - - apply_to - Whether to apply the effect to the mask too. - """ - - def __init__( - self, - w=None, - h=None, - x_speed=0, - y_speed=0, - x_start=0, - y_start=0, - apply_to="mask", - ): - - self.w = w - self.h = h - self.x_speed = x_speed - self.y_speed = y_speed - self.x_start = x_start - self.y_start = y_start - self.apply_to = apply_to - -
[docs] def apply(self, clip): - """Apply the effect to the clip.""" - if self.h is None: - self.h = clip.h - - if self.w is None: - self.w = clip.w - - x_max = self.w - 1 - y_max = self.h - 1 - - def filter(get_frame, t): - x = int(max(0, min(x_max, self.x_start + round(self.x_speed * t)))) - y = int(max(0, min(y_max, self.y_start + round(self.y_speed * t)))) - return get_frame(t)[y : y + self.h, x : x + self.w] - - return clip.transform(filter, apply_to=self.apply_to)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/SlideIn.html b/_modules/moviepy/video/fx/SlideIn.html deleted file mode 100644 index c4d151502..000000000 --- a/_modules/moviepy/video/fx/SlideIn.html +++ /dev/null @@ -1,502 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.SlideIn — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.SlideIn

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class SlideIn(Effect): - """Makes the clip arrive from one side of the screen. - - Only works when the clip is included in a CompositeVideoClip, - and if the clip has the same size as the whole composition. - - Parameters - ---------- - - clip : moviepy.Clip.Clip - A video clip. - - duration : float - Time taken for the clip to be fully visible - - side : str - Side of the screen where the clip comes from. One of - 'top', 'bottom', 'left' or 'right'. - - Examples - -------- - - >>> from moviepy import * - >>> - >>> clips = [... make a list of clips] - >>> slided_clips = [ - ... CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "left")])]) - ... for clip in clips - ... ] - >>> final_clip = concatenate_videoclips(slided_clips, padding=-1) - >>> - >>> clip = ColorClip( - ... color=(255, 0, 0), duration=1, size=(300, 300) - ... ).with_fps(60) - >>> final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "right")])]) - """ - - duration: float - side: str - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - w, h = clip.size - pos_dict = { - "left": lambda t: (min(0, w * (t / self.duration - 1)), "center"), - "right": lambda t: (max(0, w * (1 - t / self.duration)), "center"), - "top": lambda t: ("center", min(0, h * (t / self.duration - 1))), - "bottom": lambda t: ("center", max(0, h * (1 - t / self.duration))), - } - - return clip.with_position(pos_dict[self.side])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/SlideOut.html b/_modules/moviepy/video/fx/SlideOut.html deleted file mode 100644 index 0e74ca435..000000000 --- a/_modules/moviepy/video/fx/SlideOut.html +++ /dev/null @@ -1,506 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.SlideOut — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.SlideOut

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class SlideOut(Effect): - """Makes the clip goes away by one side of the screen. - - Only works when the clip is included in a CompositeVideoClip, - and if the clip has the same size as the whole composition. - - Parameters - ---------- - - clip : moviepy.Clip.Clip - A video clip. - - duration : float - Time taken for the clip to be fully visible - - side : str - Side of the screen where the clip goes. One of - 'top', 'bottom', 'left' or 'right'. - - Examples - -------- - - >>> from moviepy import * - >>> - >>> clips = [... make a list of clips] - >>> slided_clips = [ - ... CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "left")])]) - ... for clip in clips - ... ] - >>> final_clip = concatenate_videoclips(slided_clips, padding=-1) - >>> - >>> clip = ColorClip( - ... color=(255, 0, 0), duration=1, size=(300, 300) - ... ).with_fps(60) - >>> final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "right")])]) - """ - - duration: float - side: str - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - w, h = clip.size - ts = clip.duration - self.duration # start time of the effect. - pos_dict = { - "left": lambda t: (min(0, w * (-(t - ts) / self.duration)), "center"), - "right": lambda t: (max(0, w * ((t - ts) / self.duration)), "center"), - "top": lambda t: ("center", min(0, h * (-(t - ts) / self.duration))), - "bottom": lambda t: ("center", max(0, h * ((t - ts) / self.duration))), - } - - return clip.with_position(pos_dict[self.side])
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/SuperSample.html b/_modules/moviepy/video/fx/SuperSample.html deleted file mode 100644 index da27e918f..000000000 --- a/_modules/moviepy/video/fx/SuperSample.html +++ /dev/null @@ -1,473 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.SuperSample — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.SuperSample

-from dataclasses import dataclass
-
-import numpy as np
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class SuperSample(Effect): - """Replaces each frame at time t by the mean of `n_frames` equally spaced frames - taken in the interval [t-d, t+d]. This results in motion blur. - """ - - d: float - n_frames: int - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - - def filter(get_frame, t): - timings = np.linspace(t - self.d, t + self.d, self.n_frames) - frame_average = np.mean( - 1.0 * np.array([get_frame(t_) for t_ in timings], dtype="uint16"), - axis=0, - ) - return frame_average.astype("uint8") - - return clip.transform(filter)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/TimeMirror.html b/_modules/moviepy/video/fx/TimeMirror.html deleted file mode 100644 index daeef27a0..000000000 --- a/_modules/moviepy/video/fx/TimeMirror.html +++ /dev/null @@ -1,464 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.TimeMirror — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.TimeMirror

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class TimeMirror(Effect): - """ - Returns a clip that plays the current clip backwards. - The clip must have its ``duration`` attribute set. - The same effect is applied to the clip's audio and mask if any. - """ - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - return clip[::-1]
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/fx/TimeSymmetrize.html b/_modules/moviepy/video/fx/TimeSymmetrize.html deleted file mode 100644 index 5a6134c1c..000000000 --- a/_modules/moviepy/video/fx/TimeSymmetrize.html +++ /dev/null @@ -1,466 +0,0 @@ - - - - - - - - - - - moviepy.video.fx.TimeSymmetrize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.fx.TimeSymmetrize

-from dataclasses import dataclass
-
-from moviepy.Clip import Clip
-from moviepy.Effect import Effect
-
-
-
[docs]@dataclass -class TimeSymmetrize(Effect): - """ - Returns a clip that plays the current clip once forwards and - then once backwards. This is very practival to make video that - loop well, e.g. to create animated GIFs. - This effect is automatically applied to the clip's mask and audio - if they exist. - """ - -
[docs] def apply(self, clip: Clip) -> Clip: - """Apply the effect to the clip.""" - if clip.duration is None: - raise ValueError("Attribute 'duration' not set") - - return clip + clip[::-1]
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/io/ImageSequenceClip.html b/_modules/moviepy/video/io/ImageSequenceClip.html deleted file mode 100644 index 0dbbeb4ae..000000000 --- a/_modules/moviepy/video/io/ImageSequenceClip.html +++ /dev/null @@ -1,611 +0,0 @@ - - - - - - - - - - - moviepy.video.io.ImageSequenceClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.io.ImageSequenceClip

-"""Implements ImageSequenceClip, a class to create a video clip from a set
-of image files.
-"""
-
-import os
-
-import numpy as np
-from imageio.v2 import imread
-
-from moviepy.video.VideoClip import VideoClip
-
-
-
[docs]class ImageSequenceClip(VideoClip): - """A VideoClip made from a series of images. - - Parameters - ---------- - - sequence - Can be one of these: - - - The name of a folder (containing only pictures). The pictures - will be considered in alphanumerical order. - - A list of names of image files. In this case you can choose to - load the pictures in memory pictures - - A list of Numpy arrays representing images. In this last case, - masks are not supported currently. - - fps - Number of picture frames to read per second. Instead, you can provide - the duration of each image with durations (see below) - - durations - List of the duration of each picture. - - with_mask - Should the alpha layer of PNG images be considered as a mask ? - - is_mask - Will this sequence of pictures be used as an animated mask. - - load_images - Specify that all images should be loaded into the RAM. This is only - interesting if you have a small number of images that will be used - more than once. - """ - - def __init__( - self, - sequence, - fps=None, - durations=None, - with_mask=True, - is_mask=False, - load_images=False, - ): - # CODE WRITTEN AS IT CAME, MAY BE IMPROVED IN THE FUTURE - - if (fps is None) and (durations is None): - raise ValueError("Please provide either 'fps' or 'durations'.") - VideoClip.__init__(self, is_mask=is_mask) - - # Parse the data - - fromfiles = True - - if isinstance(sequence, list): - if isinstance(sequence[0], str): - if load_images: - sequence = [imread(file) for file in sequence] - fromfiles = False - else: - fromfiles = True - else: - # sequence is already a list of numpy arrays - fromfiles = False - else: - # sequence is a folder name, make it a list of files: - fromfiles = True - sequence = sorted( - [os.path.join(sequence, file) for file in os.listdir(sequence)] - ) - - # check that all the images are of the same size - if isinstance(sequence[0], str): - size = imread(sequence[0]).shape - else: - size = sequence[0].shape - - for image in sequence: - image1 = image - if isinstance(image, str): - image1 = imread(image) - if size != image1.shape: - raise Exception( - "MoviePy: ImageSequenceClip requires all images to be the same size" - ) - - self.fps = fps - if fps is not None: - durations = [1.0 / fps for image in sequence] - self.images_starts = [ - 1.0 * i / fps - np.finfo(np.float32).eps for i in range(len(sequence)) - ] - else: - self.images_starts = [0] + list(np.cumsum(durations)) - self.durations = durations - self.duration = sum(durations) - self.end = self.duration - self.sequence = sequence - - if fps is None: - self.fps = self.duration / len(sequence) - - def find_image_index(t): - return max( - [i for i in range(len(self.sequence)) if self.images_starts[i] <= t] - ) - - if fromfiles: - self.last_index = None - self.last_image = None - - def make_frame(t): - index = find_image_index(t) - - if index != self.last_index: - self.last_image = imread(self.sequence[index])[:, :, :3] - self.last_index = index - - return self.last_image - - if with_mask and (imread(self.sequence[0]).shape[2] == 4): - self.mask = VideoClip(is_mask=True) - self.mask.last_index = None - self.mask.last_image = None - - def mask_make_frame(t): - index = find_image_index(t) - if index != self.mask.last_index: - frame = imread(self.sequence[index])[:, :, 3] - self.mask.last_image = frame.astype(float) / 255 - self.mask.last_index = index - - return self.mask.last_image - - self.mask.make_frame = mask_make_frame - self.mask.size = mask_make_frame(0).shape[:2][::-1] - - else: - - def make_frame(t): - index = find_image_index(t) - return self.sequence[index][:, :, :3] - - if with_mask and (self.sequence[0].shape[2] == 4): - self.mask = VideoClip(is_mask=True) - - def mask_make_frame(t): - index = find_image_index(t) - return 1.0 * self.sequence[index][:, :, 3] / 255 - - self.mask.make_frame = mask_make_frame - self.mask.size = mask_make_frame(0).shape[:2][::-1] - - self.make_frame = make_frame - self.size = make_frame(0).shape[:2][::-1]
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/io/VideoFileClip.html b/_modules/moviepy/video/io/VideoFileClip.html deleted file mode 100644 index 99ce19395..000000000 --- a/_modules/moviepy/video/io/VideoFileClip.html +++ /dev/null @@ -1,618 +0,0 @@ - - - - - - - - - - - moviepy.video.io.VideoFileClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.io.VideoFileClip

-"""Implements VideoFileClip, a class for video clips creation using video files."""
-
-from moviepy.audio.io.AudioFileClip import AudioFileClip
-from moviepy.decorators import convert_path_to_string
-from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
-from moviepy.video.VideoClip import VideoClip
-
-
-
[docs]class VideoFileClip(VideoClip): - """ - A video clip originating from a movie file. For instance: :: - - >>> clip = VideoFileClip("myHolidays.mp4") - >>> clip.close() - >>> with VideoFileClip("myMaskVideo.avi") as clip2: - >>> pass # Implicit close called by context manager. - - - Parameters - ---------- - - filename: - The name of the video file, as a string or a path-like object. - It can have any extension supported by ffmpeg: - .ogv, .mp4, .mpeg, .avi, .mov etc. - - has_mask: - Set this to 'True' if there is a mask included in the videofile. - Video files rarely contain masks, but some video codecs enable - that. For instance if you have a MoviePy VideoClip with a mask you - can save it to a videofile with a mask. (see also - ``VideoClip.write_videofile`` for more details). - - audio: - Set to `False` if the clip doesn't have any audio or if you do not - wish to read the audio. - - target_resolution: - Set to (desired_width, desired_height) to have ffmpeg resize the frames - before returning them. This is much faster than streaming in high-res - and then resizing. If either dimension is None, the frames are resized - by keeping the existing aspect ratio. - - resize_algorithm: - The algorithm used for resizing. Default: "bicubic", other popular - options include "bilinear" and "fast_bilinear". For more information, see - https://ffmpeg.org/ffmpeg-scaler.html - - fps_source: - The fps value to collect from the metadata. Set by default to 'fps', but - can be set to 'tbr', which may be helpful if you are finding that it is reading - the incorrect fps from the file. - - pixel_format - Optional: Pixel format for the video to read. If is not specified - 'rgb24' will be used as the default format unless ``has_mask`` is set - as ``True``, then 'rgba' will be used. - - is_mask - `True` if the clip is going to be used as a mask. - - - Attributes - ---------- - - filename: - Name of the original video file. - - fps: - Frames per second in the original file. - - - Read docs for Clip() and VideoClip() for other, more generic, attributes. - - Lifetime - -------- - - Note that this creates subprocesses and locks files. If you construct one - of these instances, you must call close() afterwards, or the subresources - will not be cleaned up until the process ends. - - If copies are made, and close() is called on one, it may cause methods on - the other copies to fail. - - """ - - @convert_path_to_string("filename") - def __init__( - self, - filename, - decode_file=False, - has_mask=False, - audio=True, - audio_buffersize=200000, - target_resolution=None, - resize_algorithm="bicubic", - audio_fps=44100, - audio_nbytes=2, - fps_source="fps", - pixel_format=None, - is_mask=False, - ): - VideoClip.__init__(self, is_mask=is_mask) - - # Make a reader - if not pixel_format: - pixel_format = "rgba" if has_mask else "rgb24" - self.reader = FFMPEG_VideoReader( - filename, - decode_file=decode_file, - pixel_format=pixel_format, - target_resolution=target_resolution, - resize_algo=resize_algorithm, - fps_source=fps_source, - ) - - # Make some of the reader's attributes accessible from the clip - self.duration = self.reader.duration - self.end = self.reader.duration - - self.fps = self.reader.fps - self.size = self.reader.size - self.rotation = self.reader.rotation - - self.filename = filename - - if has_mask: - self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3] - - def mask_make_frame(t): - return self.reader.get_frame(t)[:, :, 3] / 255.0 - - self.mask = VideoClip( - is_mask=True, make_frame=mask_make_frame - ).with_duration(self.duration) - self.mask.fps = self.fps - - else: - self.make_frame = lambda t: self.reader.get_frame(t) - - # Make a reader for the audio, if any. - if audio and self.reader.infos["audio_found"]: - self.audio = AudioFileClip( - filename, - buffersize=audio_buffersize, - fps=audio_fps, - nbytes=audio_nbytes, - ) - - def __deepcopy__(self, memo): - """Implements ``copy.deepcopy(clip)`` behaviour as ``copy.copy(clip)``. - - VideoFileClip class instances can't be deeply copied because the locked Thread - of ``proc`` isn't pickleable. Without this override, calls to - ``copy.deepcopy(clip)`` would raise a ``TypeError``: - - ``` - TypeError: cannot pickle '_thread.lock' object - ``` - """ - return self.__copy__() - -
[docs] def close(self): - """Close the internal reader.""" - if self.reader: - self.reader.close() - self.reader = None - - try: - if self.audio: - self.audio.close() - self.audio = None - except AttributeError: # pragma: no cover - pass
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/io/display_in_notebook.html b/_modules/moviepy/video/io/display_in_notebook.html deleted file mode 100644 index 26b43060b..000000000 --- a/_modules/moviepy/video/io/display_in_notebook.html +++ /dev/null @@ -1,725 +0,0 @@ - - - - - - - - - - - moviepy.video.io.display_in_notebook — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.io.display_in_notebook

-"""Implements ``display_in_notebook``, a function to embed images/videos/audio in the
-Jupyter Notebook.
-"""
-
-# Notes:
-# All media are physically embedded in the Jupyter Notebook
-# (instead of simple links to the original files)
-# That is because most browsers use a cache system and they won't
-# properly refresh the media when the original files are changed.
-
-import inspect
-import os
-from base64 import b64encode
-
-from moviepy.audio.AudioClip import AudioClip
-from moviepy.tools import extensions_dict
-from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
-from moviepy.video.VideoClip import ImageClip, VideoClip
-
-
-try:  # pragma: no cover
-    from IPython.display import HTML
-
-    ipython_available = True
-
-    class HTML2(HTML):  # noqa D101
-        def __add__(self, other):
-            return HTML2(self.data + other.data)
-
-except ImportError:
-
-
[docs] def HTML2(content): # noqa D103 - return content
- - ipython_available = False - - -sorry = "Sorry, seems like your browser doesn't support HTML5 audio/video" -templates = { - "audio": ( - "<audio controls>" - "<source %(options)s src='data:audio/%(ext)s;base64,%(data)s'>" - + sorry - + "</audio>" - ), - "image": "<img %(options)s src='data:image/%(ext)s;base64,%(data)s'>", - "video": ( - "<video %(options)s" - "src='data:video/%(ext)s;base64,%(data)s' controls>" + sorry + "</video>" - ), -} - - -
[docs]def html_embed( - clip, filetype=None, maxduration=60, rd_kwargs=None, center=True, **html_kwargs -): - """Returns HTML5 code embedding the clip. - - Parameters - ---------- - - clip : moviepy.Clip.Clip - Either a file name, or a clip to preview. - Either an image, a sound or a video. Clips will actually be - written to a file and embedded as if a filename was provided. - - filetype : str, optional - One of 'video','image','audio'. If None is given, it is determined - based on the extension of ``filename``, but this can bug. - - maxduration : float, optional - An error will be raised if the clip's duration is more than the indicated - value (in seconds), to avoid spoiling the browser's cache and the RAM. - - rd_kwargs : dict, optional - Keyword arguments for the rendering, like ``dict(fps=15, bitrate="50k")``. - Allow you to give some options to the render process. You can, for - example, disable the logger bar passing ``dict(logger=None)``. - - center : bool, optional - If true (default), the content will be wrapped in a - ``<div align=middle>`` HTML container, so the content will be displayed - at the center. - - html_kwargs - Allow you to give some options, like ``width=260``, ``autoplay=True``, - ``loop=1`` etc. - - Examples - -------- - - >>> from moviepy import * - >>> # later ... - >>> html_embed(clip, width=360) - >>> html_embed(clip.audio) - - >>> clip.write_gif("test.gif") - >>> html_embed('test.gif') - - >>> clip.save_frame("first_frame.jpeg") - >>> html_embed("first_frame.jpeg") - """ - if rd_kwargs is None: # pragma: no cover - rd_kwargs = {} - - if "Clip" in str(clip.__class__): - TEMP_PREFIX = "__temp__" - if isinstance(clip, ImageClip): - filename = TEMP_PREFIX + ".png" - kwargs = {"filename": filename, "with_mask": True} - argnames = inspect.getfullargspec(clip.save_frame).args - kwargs.update( - {key: value for key, value in rd_kwargs.items() if key in argnames} - ) - clip.save_frame(**kwargs) - elif isinstance(clip, VideoClip): - filename = TEMP_PREFIX + ".mp4" - kwargs = {"filename": filename, "preset": "ultrafast"} - kwargs.update(rd_kwargs) - clip.write_videofile(**kwargs) - elif isinstance(clip, AudioClip): - filename = TEMP_PREFIX + ".mp3" - kwargs = {"filename": filename} - kwargs.update(rd_kwargs) - clip.write_audiofile(**kwargs) - else: - raise ValueError("Unknown class for the clip. Cannot embed and preview.") - - return html_embed( - filename, - maxduration=maxduration, - rd_kwargs=rd_kwargs, - center=center, - **html_kwargs, - ) - - filename = clip - options = " ".join(["%s='%s'" % (str(k), str(v)) for k, v in html_kwargs.items()]) - name, ext = os.path.splitext(filename) - ext = ext[1:] - - if filetype is None: - ext = filename.split(".")[-1].lower() - if ext == "gif": - filetype = "image" - elif ext in extensions_dict: - filetype = extensions_dict[ext]["type"] - else: - raise ValueError( - "No file type is known for the provided file. Please provide " - "argument `filetype` (one of 'image', 'video', 'sound') to the " - "display_in_notebook function." - ) - - if filetype == "video": - # The next lines set the HTML5-cvompatible extension and check that the - # extension is HTML5-valid - exts_htmltype = {"mp4": "mp4", "webm": "webm", "ogv": "ogg"} - allowed_exts = " ".join(exts_htmltype.keys()) - try: - ext = exts_htmltype[ext] - except Exception: - raise ValueError( - "This video extension cannot be displayed in the " - "Jupyter Notebook. Allowed extensions: " + allowed_exts - ) - - if filetype in ["audio", "video"]: - duration = ffmpeg_parse_infos(filename, decode_file=True)["duration"] - if duration > maxduration: - raise ValueError( - ( - "The duration of video %s (%.1f) exceeds the 'maxduration'" - " attribute. You can increase 'maxduration', by passing" - " 'maxduration' parameter to display_in_notebook function." - " But note that embedding large videos may take all the memory" - " away!" - ) - % (filename, duration) - ) - - with open(filename, "rb") as file: - data = b64encode(file.read()).decode("utf-8") - - template = templates[filetype] - - result = template % {"data": data, "options": options, "ext": ext} - if center: - result = r"<div align=middle>%s</div>" % result - - return result
- - -
[docs]def display_in_notebook( - clip, - filetype=None, - maxduration=60, - t=None, - fps=None, - rd_kwargs=None, - center=True, - **html_kwargs, -): - """Displays clip content in an Jupyter Notebook. - - Remarks: If your browser doesn't support HTML5, this should warn you. - If nothing is displayed, maybe your file or filename is wrong. - Important: The media will be physically embedded in the notebook. - - Parameters - ---------- - - clip : moviepy.Clip.Clip - Either the name of a file, or a clip to preview. The clip will actually - be written to a file and embedded as if a filename was provided. - - filetype : str, optional - One of ``"video"``, ``"image"`` or ``"audio"``. If None is given, it is - determined based on the extension of ``filename``, but this can bug. - - maxduration : float, optional - An error will be raised if the clip's duration is more than the indicated - value (in seconds), to avoid spoiling the browser's cache and the RAM. - - t : float, optional - If not None, only the frame at time t will be displayed in the notebook, - instead of a video of the clip. - - fps : int, optional - Enables to specify an fps, as required for clips whose fps is unknown. - - rd_kwargs : dict, optional - Keyword arguments for the rendering, like ``dict(fps=15, bitrate="50k")``. - Allow you to give some options to the render process. You can, for - example, disable the logger bar passing ``dict(logger=None)``. - - center : bool, optional - If true (default), the content will be wrapped in a - ``<div align=middle>`` HTML container, so the content will be displayed - at the center. - - kwargs - Allow you to give some options, like ``width=260``, etc. When editing - looping gifs, a good choice is ``loop=1, autoplay=1``. - - Examples - -------- - - >>> from moviepy import * - >>> # later ... - >>> clip.display_in_notebook(width=360) - >>> clip.audio.display_in_notebook() - - >>> clip.write_gif("test.gif") - >>> display_in_notebook('test.gif') - - >>> clip.save_frame("first_frame.jpeg") - >>> display_in_notebook("first_frame.jpeg") - """ - if not ipython_available: - raise ImportError("Only works inside an Jupyter Notebook") - - if rd_kwargs is None: - rd_kwargs = {} - - if fps is not None: - rd_kwargs["fps"] = fps - - if t is not None: - clip = clip.to_ImageClip(t) - - return HTML2( - html_embed( - clip, - filetype=filetype, - maxduration=maxduration, - center=center, - rd_kwargs=rd_kwargs, - **html_kwargs, - ) - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/io/ffmpeg_reader.html b/_modules/moviepy/video/io/ffmpeg_reader.html deleted file mode 100644 index 5b73b96e1..000000000 --- a/_modules/moviepy/video/io/ffmpeg_reader.html +++ /dev/null @@ -1,1275 +0,0 @@ - - - - - - - - - - - moviepy.video.io.ffmpeg_reader — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.io.ffmpeg_reader

-"""Implements all the functions to read a video or a picture using ffmpeg."""
-
-import os
-import re
-import subprocess as sp
-import warnings
-
-import numpy as np
-
-from moviepy.config import FFMPEG_BINARY  # ffmpeg, ffmpeg.exe, etc...
-from moviepy.tools import convert_to_seconds, cross_platform_popen_params
-
-
-
[docs]class FFMPEG_VideoReader: - """Class for video byte-level reading with ffmpeg.""" - - def __init__( - self, - filename, - decode_file=True, - print_infos=False, - bufsize=None, - pixel_format="rgb24", - check_duration=True, - target_resolution=None, - resize_algo="bicubic", - fps_source="fps", - ): - self.filename = filename - self.proc = None - infos = ffmpeg_parse_infos( - filename, - check_duration=check_duration, - fps_source=fps_source, - decode_file=decode_file, - print_infos=print_infos, - ) - self.fps = infos["video_fps"] - self.size = infos["video_size"] - - # ffmpeg automatically rotates videos if rotation information is - # available, so exchange width and height - self.rotation = abs(infos.get("video_rotation", 0)) - if self.rotation in [90, 270]: - self.size = [self.size[1], self.size[0]] - - if target_resolution: - if None in target_resolution: - ratio = 1 - for idx, target in enumerate(target_resolution): - if target: - ratio = target / self.size[idx] - self.size = (int(self.size[0] * ratio), int(self.size[1] * ratio)) - else: - self.size = target_resolution - self.resize_algo = resize_algo - - self.duration = infos["video_duration"] - self.ffmpeg_duration = infos["duration"] - self.n_frames = infos["video_n_frames"] - self.bitrate = infos["video_bitrate"] - - self.infos = infos - - self.pixel_format = pixel_format - self.depth = 4 if pixel_format[-1] == "a" else 3 - # 'a' represents 'alpha' which means that each pixel has 4 values instead of 3. - # See https://github.com/Zulko/moviepy/issues/1070#issuecomment-644457274 - - if bufsize is None: - w, h = self.size - bufsize = self.depth * w * h + 100 - - self.bufsize = bufsize - self.initialize() - -
[docs] def initialize(self, start_time=0): - """ - Opens the file, creates the pipe. - - Sets self.pos to the appropriate value (1 if start_time == 0 because - it pre-reads the first frame). - """ - self.close(delete_lastread=False) # if any - - if start_time != 0: - offset = min(1, start_time) - i_arg = [ - "-ss", - "%.06f" % (start_time - offset), - "-i", - self.filename, - "-ss", - "%.06f" % offset, - ] - else: - i_arg = ["-i", self.filename] - - cmd = ( - [FFMPEG_BINARY] - + i_arg - + [ - "-loglevel", - "error", - "-f", - "image2pipe", - "-vf", - "scale=%d:%d" % tuple(self.size), - "-sws_flags", - self.resize_algo, - "-pix_fmt", - self.pixel_format, - "-vcodec", - "rawvideo", - "-", - ] - ) - popen_params = cross_platform_popen_params( - { - "bufsize": self.bufsize, - "stdout": sp.PIPE, - "stderr": sp.PIPE, - "stdin": sp.DEVNULL, - } - ) - self.proc = sp.Popen(cmd, **popen_params) - - # self.pos represents the (0-indexed) index of the frame that is next in line - # to be read by self.read_frame(). - # Eg when self.pos is 1, the 2nd frame will be read next. - self.pos = self.get_frame_number(start_time) - self.lastread = self.read_frame()
- -
[docs] def skip_frames(self, n=1): - """Reads and throws away n frames""" - w, h = self.size - for i in range(n): - self.proc.stdout.read(self.depth * w * h) - - # self.proc.stdout.flush() - self.pos += n
- -
[docs] def read_frame(self): - """ - Reads the next frame from the file. - Note that upon (re)initialization, the first frame will already have been read - and stored in ``self.lastread``. - """ - w, h = self.size - nbytes = self.depth * w * h - - s = self.proc.stdout.read(nbytes) - - if len(s) != nbytes: - warnings.warn( - ( - "In file %s, %d bytes wanted but %d bytes read at frame index" - " %d (out of a total %d frames), at time %.02f/%.02f sec." - " Using the last valid frame instead." - ) - % ( - self.filename, - nbytes, - len(s), - self.pos, - self.n_frames, - 1.0 * self.pos / self.fps, - self.duration, - ), - UserWarning, - ) - if not hasattr(self, "last_read"): - raise IOError( - ( - "MoviePy error: failed to read the first frame of " - f"video file {self.filename}. That might mean that the file is " - "corrupted. That may also mean that you are using " - "a deprecated version of FFMPEG. On Ubuntu/Debian " - "for instance the version in the repos is deprecated. " - "Please update to a recent version from the website." - ) - ) - - result = self.last_read - - else: - if hasattr(np, "frombuffer"): - result = np.frombuffer(s, dtype="uint8") - else: - result = np.fromstring(s, dtype="uint8") - result.shape = (h, w, len(s) // (w * h)) # reshape((h, w, len(s)//(w*h))) - self.last_read = result - - # We have to do this down here because `self.pos` is used in the warning above - self.pos += 1 - - return result
- -
[docs] def get_frame(self, t): - """Read a file video frame at time t. - - Note for coders: getting an arbitrary frame in the video with - ffmpeg can be painfully slow if some decoding has to be done. - This function tries to avoid fetching arbitrary frames - whenever possible, by moving between adjacent frames. - """ - # + 1 so that it represents the frame position that it will be - # after the frame is read. This makes the later comparisons easier. - pos = self.get_frame_number(t) + 1 - - # Initialize proc if it is not open - if not self.proc: - print("Proc not detected") - self.initialize(t) - return self.last_read - - if pos == self.pos: - return self.last_read - elif (pos < self.pos) or (pos > self.pos + 100): - # We can't just skip forward to `pos` or it would take too long - self.initialize(t) - return self.lastread - else: - # If pos == self.pos + 1, this line has no effect - self.skip_frames(pos - self.pos - 1) - result = self.read_frame() - return result
- -
[docs] def get_frame_number(self, t): - """Helper method to return the frame number at time ``t``""" - # I used this horrible '+0.00001' hack because sometimes due to numerical - # imprecisions a 3.0 can become a 2.99999999... which makes the int() - # go to the previous integer. This makes the fetching more robust when you - # are getting the nth frame by writing get_frame(n/fps). - return int(self.fps * t + 0.00001)
- -
[docs] def close(self, delete_lastread=True): - """Closes the reader terminating the process, if is still open.""" - if self.proc: - if self.proc.poll() is None: - self.proc.terminate() - self.proc.stdout.close() - self.proc.stderr.close() - self.proc.wait() - self.proc = None - if delete_lastread and hasattr(self, "last_read"): - del self.last_read
- - def __del__(self): - self.close()
- - -
[docs]def ffmpeg_read_image(filename, with_mask=True, pixel_format=None): - """Read an image file (PNG, BMP, JPEG...). - - Wraps FFMPEG_Videoreader to read just one image. - Returns an ImageClip. - - This function is not meant to be used directly in MoviePy. - Use ImageClip instead to make clips out of image files. - - Parameters - ---------- - - filename - Name of the image file. Can be of any format supported by ffmpeg. - - with_mask - If the image has a transparency layer, ``with_mask=true`` will save - this layer as the mask of the returned ImageClip - - pixel_format - Optional: Pixel format for the image to read. If is not specified - 'rgb24' will be used as the default format unless ``with_mask`` is set - as ``True``, then 'rgba' will be used. - - """ - if not pixel_format: - pixel_format = "rgba" if with_mask else "rgb24" - reader = FFMPEG_VideoReader( - filename, pixel_format=pixel_format, check_duration=False - ) - im = reader.last_read - del reader - return im
- - -
[docs]class FFmpegInfosParser: - """Finite state ffmpeg `-i` command option file information parser. - Is designed to parse the output fast, in one loop. Iterates line by - line of the `ffmpeg -i <filename> [-f null -]` command output changing - the internal state of the parser. - - Parameters - ---------- - - filename - Name of the file parsed, only used to raise accurate error messages. - - infos - Information returned by FFmpeg. - - fps_source - Indicates what source data will be preferably used to retrieve fps data. - - check_duration - Enable or disable the parsing of the duration of the file. Useful to - skip the duration check, for example, for images. - - decode_file - Indicates if the whole file has been decoded. The duration parsing strategy - will differ depending on this argument. - """ - - def __init__( - self, - infos, - filename, - fps_source="fps", - check_duration=True, - decode_file=False, - ): - self.infos = infos - self.filename = filename - self.check_duration = check_duration - self.fps_source = fps_source - self.duration_tag_separator = "time=" if decode_file else "Duration: " - - self._reset_state() - - def _reset_state(self): - """Reinitializes the state of the parser. Used internally at - initialization and at the end of the parsing process. - """ - # could be 2 possible types of metadata: - # - file_metadata: Metadata of the container. Here are the tags set - # by the user using `-metadata` ffmpeg option - # - stream_metadata: Metadata for each stream of the container. - self._inside_file_metadata = False - - # this state is needed if `duration_tag_separator == "time="` because - # execution of ffmpeg decoding the whole file using `-f null -` appends - # to the output the blocks "Stream mapping:" and "Output:", which - # should be ignored - self._inside_output = False - - # flag which indicates that a default stream has not been found yet - self._default_stream_found = False - - # current input file, stream and chapter, which will be built at runtime - self._current_input_file = {"streams": []} - self._current_stream = None - self._current_chapter = None - - # resulting data of the parsing process - self.result = { - "video_found": False, - "audio_found": False, - "metadata": {}, - "inputs": [], - } - - # keep the value of latest metadata value parsed so we can build - # at next lines a multiline metadata value - self._last_metadata_field_added = None - -
[docs] def parse(self): - """Parses the information returned by FFmpeg in stderr executing their binary - for a file with ``-i`` option and returns a dictionary with all data needed - by MoviePy. - """ - # chapters by input file - input_chapters = [] - - for line in self.infos.splitlines()[1:]: - if ( - self.duration_tag_separator == "time=" - and self.check_duration - and "time=" in line - ): - # parse duration using file decodification - self.result["duration"] = self.parse_duration(line) - elif self._inside_output or line[0] != " ": - if self.duration_tag_separator == "time=" and not self._inside_output: - self._inside_output = True - # skip lines like "At least one output file must be specified" - elif not self._inside_file_metadata and line.startswith(" Metadata:"): - # enter " Metadata:" group - self._inside_file_metadata = True - elif line.startswith(" Duration:"): - # exit " Metadata:" group - self._inside_file_metadata = False - if self.check_duration and self.duration_tag_separator == "Duration: ": - self.result["duration"] = self.parse_duration(line) - - # parse global bitrate (in kb/s) - bitrate_match = re.search(r"bitrate: (\d+) kb/s", line) - self.result["bitrate"] = ( - int(bitrate_match.group(1)) if bitrate_match else None - ) - - # parse start time (in seconds) - start_match = re.search(r"start: (\d+\.?\d+)", line) - self.result["start"] = ( - float(start_match.group(1)) if start_match else None - ) - elif self._inside_file_metadata: - # file metadata line - field, value = self.parse_metadata_field_value(line) - - # multiline metadata value parsing - if field == "": - field = self._last_metadata_field_added - value = self.result["metadata"][field] + "\n" + value - else: - self._last_metadata_field_added = field - self.result["metadata"][field] = value - elif line.lstrip().startswith("Stream "): - # exit stream " Metadata:" - if self._current_stream: - self._current_input_file["streams"].append(self._current_stream) - - # get input number, stream number, language and type - main_info_match = re.search( - r"^Stream\s#(\d+):(\d+)(?:\[\w+\])?\(?(\w+)?\)?:\s(\w+):", - line.lstrip(), - ) - ( - input_number, - stream_number, - language, - stream_type, - ) = main_info_match.groups() - input_number = int(input_number) - stream_number = int(stream_number) - stream_type_lower = stream_type.lower() - - if language == "und": - language = None - - # start builiding the current stream - self._current_stream = { - "input_number": input_number, - "stream_number": stream_number, - "stream_type": stream_type_lower, - "language": language, - "default": not self._default_stream_found - or line.endswith("(default)"), - } - self._default_stream_found = True - - # for default streams, set their numbers globally, so it's - # easy to get without iterating all - if self._current_stream["default"]: - self.result[f"default_{stream_type_lower}_input_number"] = ( - input_number - ) - self.result[f"default_{stream_type_lower}_stream_number"] = ( - stream_number - ) - - # exit chapter - if self._current_chapter: - input_chapters[input_number].append(self._current_chapter) - self._current_chapter = None - - if "input_number" not in self._current_input_file: - # first input file - self._current_input_file["input_number"] = input_number - elif self._current_input_file["input_number"] != input_number: - # new input file - - # include their chapters if there are for this input file - if len(input_chapters) >= input_number + 1: - self._current_input_file["chapters"] = input_chapters[ - input_number - ] - - # add new input file to self.result - self.result["inputs"].append(self._current_input_file) - self._current_input_file = {"input_number": input_number} - - # parse relevant data by stream type - try: - global_data, stream_data = self.parse_data_by_stream_type( - stream_type, line - ) - except NotImplementedError as exc: - warnings.warn( - f"{str(exc)}\nffmpeg output:\n\n{self.infos}", UserWarning - ) - else: - self.result.update(global_data) - self._current_stream.update(stream_data) - elif line.startswith(" Metadata:"): - # enter group " Metadata:" - continue - elif self._current_stream: - # stream metadata line - if "metadata" not in self._current_stream: - self._current_stream["metadata"] = {} - - field, value = self.parse_metadata_field_value(line) - - if self._current_stream["stream_type"] == "video": - field, value = self.video_metadata_type_casting(field, value) - if field == "rotate": - self.result["video_rotation"] = value - - # multiline metadata value parsing - if field == "": - field = self._last_metadata_field_added - value = self._current_stream["metadata"][field] + "\n" + value - else: - self._last_metadata_field_added = field - self._current_stream["metadata"][field] = value - elif line.startswith(" Chapter"): - # Chapter data line - if self._current_chapter: - # there is a previews chapter? - if len(input_chapters) < self._current_chapter["input_number"] + 1: - input_chapters.append([]) - # include in the chapters by input matrix - input_chapters[self._current_chapter["input_number"]].append( - self._current_chapter - ) - - # extract chapter data - chapter_data_match = re.search( - r"^ Chapter #(\d+):(\d+): start (\d+\.?\d+?), end (\d+\.?\d+?)", - line, - ) - input_number, chapter_number, start, end = chapter_data_match.groups() - - # start building the chapter - self._current_chapter = { - "input_number": int(input_number), - "chapter_number": int(chapter_number), - "start": float(start), - "end": float(end), - } - elif self._current_chapter: - # inside chapter metadata - if "metadata" not in self._current_chapter: - self._current_chapter["metadata"] = {} - field, value = self.parse_metadata_field_value(line) - - # multiline metadata value parsing - if field == "": - field = self._last_metadata_field_added - value = self._current_chapter["metadata"][field] + "\n" + value - else: - self._last_metadata_field_added = field - self._current_chapter["metadata"][field] = value - - # last input file, must be included in self.result - if self._current_input_file: - self._current_input_file["streams"].append(self._current_stream) - # include their chapters, if there are - if len(input_chapters) == self._current_input_file["input_number"] + 1: - self._current_input_file["chapters"] = input_chapters[ - self._current_input_file["input_number"] - ] - self.result["inputs"].append(self._current_input_file) - - # some video duration utilities - if self.result["video_found"] and self.check_duration: - self.result["video_n_frames"] = int( - self.result["duration"] * self.result["video_fps"] - ) - self.result["video_duration"] = self.result["duration"] - else: - self.result["video_n_frames"] = 1 - self.result["video_duration"] = None - # We could have also recomputed duration from the number of frames, as follows: - # >>> result['video_duration'] = result['video_n_frames'] / result['video_fps'] - - # not default audio found, assume first audio stream is the default - if self.result["audio_found"] and not self.result.get("audio_bitrate"): - self.result["audio_bitrate"] = None - for streams_input in self.result["inputs"]: - for stream in streams_input["streams"]: - if stream["stream_type"] == "audio" and stream.get("bitrate"): - self.result["audio_bitrate"] = stream["bitrate"] - break - - if self.result["audio_bitrate"] is not None: - break - - result = self.result - - # reset state of the parser - self._reset_state() - - return result
- -
[docs] def parse_data_by_stream_type(self, stream_type, line): - """Parses data from "Stream ... {stream_type}" line.""" - try: - return { - "Audio": self.parse_audio_stream_data, - "Video": self.parse_video_stream_data, - "Data": lambda _line: ({}, {}), - }[stream_type](line) - except KeyError: - raise NotImplementedError( - f"{stream_type} stream parsing is not supported by moviepy and" - " will be ignored" - )
- -
[docs] def parse_audio_stream_data(self, line): - """Parses data from "Stream ... Audio" line.""" - global_data, stream_data = ({"audio_found": True}, {}) - try: - stream_data["fps"] = int(re.search(r" (\d+) Hz", line).group(1)) - except (AttributeError, ValueError): - # AttributeError: 'NoneType' object has no attribute 'group' - # ValueError: invalid literal for int() with base 10: '<string>' - stream_data["fps"] = "unknown" - match_audio_bitrate = re.search(r"(\d+) kb/s", line) - stream_data["bitrate"] = ( - int(match_audio_bitrate.group(1)) if match_audio_bitrate else None - ) - if self._current_stream["default"]: - global_data["audio_fps"] = stream_data["fps"] - global_data["audio_bitrate"] = stream_data["bitrate"] - return (global_data, stream_data)
- -
[docs] def parse_video_stream_data(self, line): - """Parses data from "Stream ... Video" line.""" - global_data, stream_data = ({"video_found": True}, {}) - - try: - match_video_size = re.search(r" (\d+)x(\d+)[,\s]", line) - if match_video_size: - # size, of the form 460x320 (w x h) - stream_data["size"] = [int(num) for num in match_video_size.groups()] - except Exception: - raise IOError( - ( - "MoviePy error: failed to read video dimensions in" - " file '%s'.\nHere are the file infos returned by" - "ffmpeg:\n\n%s" - ) - % (self.filename, self.infos) - ) - - match_bitrate = re.search(r"(\d+) kb/s", line) - stream_data["bitrate"] = int(match_bitrate.group(1)) if match_bitrate else None - - # Get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes - # tbc, and sometimes tbc/2... - # Current policy: Trust fps first, then tbr unless fps_source is - # specified as 'tbr' in which case try tbr then fps - - # If result is near from x*1000/1001 where x is 23,24,25,50, - # replace by x*1000/1001 (very common case for the fps). - - if self.fps_source == "fps": - try: - fps = self.parse_fps(line) - except (AttributeError, ValueError): - fps = self.parse_tbr(line) - elif self.fps_source == "tbr": - try: - fps = self.parse_tbr(line) - except (AttributeError, ValueError): - fps = self.parse_fps(line) - else: - raise ValueError( - ("fps source '%s' not supported parsing the video '%s'") - % (self.fps_source, self.filename) - ) - - # It is known that a fps of 24 is often written as 24000/1001 - # but then ffmpeg nicely rounds it to 23.98, which we hate. - coef = 1000.0 / 1001.0 - for x in [23, 24, 25, 30, 50]: - if (fps != x) and abs(fps - x * coef) < 0.01: - fps = x * coef - stream_data["fps"] = fps - - if self._current_stream["default"] or "video_size" not in self.result: - global_data["video_size"] = stream_data.get("size", None) - if self._current_stream["default"] or "video_bitrate" not in self.result: - global_data["video_bitrate"] = stream_data.get("bitrate", None) - if self._current_stream["default"] or "video_fps" not in self.result: - global_data["video_fps"] = stream_data["fps"] - - return (global_data, stream_data)
- -
[docs] def parse_fps(self, line): - """Parses number of FPS from a line of the ``ffmpeg -i`` command output.""" - return float(re.search(r" (\d+.?\d*) fps", line).group(1))
- -
[docs] def parse_tbr(self, line): - """Parses number of TBS from a line of the ``ffmpeg -i`` command output.""" - s_tbr = re.search(r" (\d+.?\d*k?) tbr", line).group(1) - - # Sometimes comes as e.g. 12k. We need to replace that with 12000. - if s_tbr[-1] == "k": - tbr = float(s_tbr[:-1]) * 1000 - else: - tbr = float(s_tbr) - return tbr
- -
[docs] def parse_duration(self, line): - """Parse the duration from the line that outputs the duration of - the container. - """ - try: - time_raw_string = line.split(self.duration_tag_separator)[-1] - match_duration = re.search( - r"([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])", - time_raw_string, - ) - return convert_to_seconds(match_duration.group(1)) - except Exception: - raise IOError( - ( - "MoviePy error: failed to read the duration of file '%s'.\n" - "Here are the file infos returned by ffmpeg:\n\n%s" - ) - % (self.filename, self.infos) - )
- -
[docs] def parse_metadata_field_value( - self, - line, - ): - """Returns a tuple with a metadata field-value pair given a ffmpeg `-i` - command output line. - """ - raw_field, raw_value = line.split(":", 1) - return (raw_field.strip(" "), raw_value.strip(" "))
- -
[docs] def video_metadata_type_casting(self, field, value): - """Cast needed video metadata fields to other types than the default str.""" - if field == "rotate": - return (field, float(value)) - return (field, value)
- - -
[docs]def ffmpeg_parse_infos( - filename, - check_duration=True, - fps_source="fps", - decode_file=False, - print_infos=False, -): - """Get the information of a file using ffmpeg. - - Returns a dictionary with next fields: - - - ``"duration"`` - - ``"metadata"`` - - ``"inputs"`` - - ``"video_found"`` - - ``"video_fps"`` - - ``"video_n_frames"`` - - ``"video_duration"`` - - ``"video_bitrate"`` - - ``"video_metadata"`` - - ``"audio_found"`` - - ``"audio_fps"`` - - ``"audio_bitrate"`` - - ``"audio_metadata"`` - - Note that "video_duration" is slightly smaller than "duration" to avoid - fetching the incomplete frames at the end, which raises an error. - - Parameters - ---------- - - filename - Name of the file parsed, only used to raise accurate error messages. - - infos - Information returned by FFmpeg. - - fps_source - Indicates what source data will be preferably used to retrieve fps data. - - check_duration - Enable or disable the parsing of the duration of the file. Useful to - skip the duration check, for example, for images. - - decode_file - Indicates if the whole file must be read to retrieve their duration. - This is needed for some files in order to get the correct duration (see - https://github.com/Zulko/moviepy/pull/1222). - """ - # Open the file in a pipe, read output - cmd = [FFMPEG_BINARY, "-hide_banner", "-i", filename] - if decode_file: - cmd.extend(["-f", "null", "-"]) - - popen_params = cross_platform_popen_params( - { - "bufsize": 10**5, - "stdout": sp.PIPE, - "stderr": sp.PIPE, - "stdin": sp.DEVNULL, - } - ) - - proc = sp.Popen(cmd, **popen_params) - (output, error) = proc.communicate() - infos = error.decode("utf8", errors="ignore") - - proc.terminate() - del proc - - if print_infos: - # print the whole info text returned by FFMPEG - print(infos) - - try: - return FFmpegInfosParser( - infos, - filename, - fps_source=fps_source, - check_duration=check_duration, - decode_file=decode_file, - ).parse() - except Exception as exc: - if os.path.isdir(filename): - raise IsADirectoryError(f"'{filename}' is a directory") - elif not os.path.exists(filename): - raise FileNotFoundError(f"'{filename}' not found") - raise IOError(f"Error passing `ffmpeg -i` command output:\n\n{infos}") from exc
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/io/ffmpeg_tools.html b/_modules/moviepy/video/io/ffmpeg_tools.html deleted file mode 100644 index 9009ca80f..000000000 --- a/_modules/moviepy/video/io/ffmpeg_tools.html +++ /dev/null @@ -1,644 +0,0 @@ - - - - - - - - - - - moviepy.video.io.ffmpeg_tools — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.io.ffmpeg_tools

-"""Miscellaneous bindings to ffmpeg."""
-
-import os
-
-from moviepy.config import FFMPEG_BINARY
-from moviepy.decorators import convert_parameter_to_seconds, convert_path_to_string
-from moviepy.tools import subprocess_call
-
-
-
[docs]@convert_path_to_string(("inputfile", "outputfile")) -@convert_parameter_to_seconds(("start_time", "end_time")) -def ffmpeg_extract_subclip( - inputfile, start_time, end_time, outputfile=None, logger="bar" -): - """Makes a new video file playing video file between two times. - - Parameters - ---------- - - inputfile : str - Path to the file from which the subclip will be extracted. - - start_time : float - Moment of the input clip that marks the start of the produced subclip. - - end_time : float - Moment of the input clip that marks the end of the produced subclip. - - outputfile : str, optional - Path to the output file. Defaults to - ``<inputfile_name>SUB<start_time>_<end_time><ext>``. - """ - if not outputfile: - name, ext = os.path.splitext(inputfile) - t1, t2 = [int(1000 * t) for t in [start_time, end_time]] - outputfile = "%sSUB%d_%d%s" % (name, t1, t2, ext) - - cmd = [ - FFMPEG_BINARY, - "-y", - "-ss", - "%0.2f" % start_time, - "-i", - inputfile, - "-t", - "%0.2f" % (end_time - start_time), - "-map", - "0", - "-vcodec", - "copy", - "-acodec", - "copy", - "-copyts", - outputfile, - ] - subprocess_call(cmd, logger=logger)
- - -
[docs]@convert_path_to_string(("videofile", "audiofile", "outputfile")) -def ffmpeg_merge_video_audio( - videofile, - audiofile, - outputfile, - video_codec="copy", - audio_codec="copy", - logger="bar", -): - """Merges video file and audio file into one movie file. - - Parameters - ---------- - - videofile : str - Path to the video file used in the merge. - - audiofile : str - Path to the audio file used in the merge. - - outputfile : str - Path to the output file. - - video_codec : str, optional - Video codec used by FFmpeg in the merge. - - audio_codec : str, optional - Audio codec used by FFmpeg in the merge. - """ - cmd = [ - FFMPEG_BINARY, - "-y", - "-i", - audiofile, - "-i", - videofile, - "-vcodec", - video_codec, - "-acodec", - audio_codec, - outputfile, - ] - - subprocess_call(cmd, logger=logger)
- - -
[docs]@convert_path_to_string(("inputfile", "outputfile")) -def ffmpeg_extract_audio(inputfile, outputfile, bitrate=3000, fps=44100, logger="bar"): - """Extract the sound from a video file and save it in ``outputfile``. - - Parameters - ---------- - - inputfile : str - The path to the file from which the audio will be extracted. - - outputfile : str - The path to the file to which the audio will be stored. - - bitrate : int, optional - Bitrate for the new audio file. - - fps : int, optional - Frame rate for the new audio file. - """ - cmd = [ - FFMPEG_BINARY, - "-y", - "-i", - inputfile, - "-ab", - "%dk" % bitrate, - "-ar", - "%d" % fps, - outputfile, - ] - subprocess_call(cmd, logger=logger)
- - -
[docs]@convert_path_to_string(("inputfile", "outputfile")) -def ffmpeg_resize(inputfile, outputfile, size, logger="bar"): - """Resizes a file to new size and write the result in another. - - Parameters - ---------- - - inputfile : str - Path to the file to be resized. - - outputfile : str - Path to the output file. - - size : list or tuple - New size in format ``[width, height]`` for the output file. - """ - cmd = [ - FFMPEG_BINARY, - "-i", - inputfile, - "-vf", - "scale=%d:%d" % (size[0], size[1]), - outputfile, - ] - - subprocess_call(cmd, logger=logger)
- - -
[docs]@convert_path_to_string(("inputfile", "outputfile", "output_dir")) -def ffmpeg_stabilize_video( - inputfile, outputfile=None, output_dir="", overwrite_file=True, logger="bar" -): - """ - Stabilizes ``filename`` and write the result to ``output``. - - Parameters - ---------- - - inputfile : str - The name of the shaky video. - - outputfile : str, optional - The name of new stabilized video. Defaults to appending '_stabilized' to - the input file name. - - output_dir : str, optional - The directory to place the output video in. Defaults to the current - working directory. - - overwrite_file : bool, optional - If ``outputfile`` already exists in ``output_dir``, then overwrite - ``outputfile`` Defaults to True. - """ - if not outputfile: - without_dir = os.path.basename(inputfile) - name, ext = os.path.splitext(without_dir) - outputfile = f"{name}_stabilized{ext}" - - outputfile = os.path.join(output_dir, outputfile) - cmd = [FFMPEG_BINARY, "-i", inputfile, "-vf", "deshake", outputfile] - if overwrite_file: - cmd.append("-y") - subprocess_call(cmd, logger=logger)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/io/ffmpeg_writer.html b/_modules/moviepy/video/io/ffmpeg_writer.html deleted file mode 100644 index 2367f06fe..000000000 --- a/_modules/moviepy/video/io/ffmpeg_writer.html +++ /dev/null @@ -1,776 +0,0 @@ - - - - - - - - - - - moviepy.video.io.ffmpeg_writer — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.io.ffmpeg_writer

-"""
-On the long term this will implement several methods to make videos
-out of VideoClips
-"""
-
-import subprocess as sp
-
-import numpy as np
-from proglog import proglog
-
-from moviepy.config import FFMPEG_BINARY
-from moviepy.tools import cross_platform_popen_params
-
-
-
[docs]class FFMPEG_VideoWriter: - """A class for FFMPEG-based video writing. - - Parameters - ---------- - - filename : str - Any filename like ``"video.mp4"`` etc. but if you want to avoid - complications it is recommended to use the generic extension ``".avi"`` - for all your videos. - - size : tuple or list - Size of the output video in pixels (width, height). - - fps : int - Frames per second in the output video file. - - codec : str, optional - FFMPEG codec. It seems that in terms of quality the hierarchy is - 'rawvideo' = 'png' > 'mpeg4' > 'libx264' - 'png' manages the same lossless quality as 'rawvideo' but yields - smaller files. Type ``ffmpeg -codecs`` in a terminal to get a list - of accepted codecs. - - Note for default 'libx264': by default the pixel format yuv420p - is used. If the video dimensions are not both even (e.g. 720x405) - another pixel format is used, and this can cause problem in some - video readers. - - audiofile : str, optional - The name of an audio file that will be incorporated to the video. - - preset : str, optional - Sets the time that FFMPEG will take to compress the video. The slower, - the better the compression rate. Possibilities are: ``"ultrafast"``, - ``"superfast"``, ``"veryfast"``, ``"faster"``, ``"fast"``, ``"medium"`` - (default), ``"slow"``, ``"slower"``, ``"veryslow"``, ``"placebo"``. - - bitrate : str, optional - Only relevant for codecs which accept a bitrate. "5000k" offers - nice results in general. - - with_mask : bool, optional - Set to ``True`` if there is a mask in the video to be encoded. - - pixel_format : str, optional - Optional: Pixel format for the output video file. If is not specified - ``"rgb24"`` will be used as the default format unless ``with_mask`` is - set as ``True``, then ``"rgba"`` will be used. - - logfile : int, optional - File descriptor for logging output. If not defined, ``subprocess.PIPE`` - will be used. Defined using another value, the log level of the ffmpeg - command will be "info", otherwise "error". - - threads : int, optional - Number of threads used to write the output with ffmpeg. - - ffmpeg_params : list, optional - Additional parameters passed to ffmpeg command. - """ - - def __init__( - self, - filename, - size, - fps, - codec="libx264", - audiofile=None, - preset="medium", - bitrate=None, - with_mask=False, - logfile=None, - threads=None, - ffmpeg_params=None, - pixel_format=None, - ): - if logfile is None: - logfile = sp.PIPE - self.logfile = logfile - self.filename = filename - self.codec = codec - self.ext = self.filename.split(".")[-1] - if not pixel_format: # pragma: no cover - pixel_format = "rgba" if with_mask else "rgb24" - - # order is important - cmd = [ - FFMPEG_BINARY, - "-y", - "-loglevel", - "error" if logfile == sp.PIPE else "info", - "-f", - "rawvideo", - "-vcodec", - "rawvideo", - "-s", - "%dx%d" % (size[0], size[1]), - "-pix_fmt", - pixel_format, - "-r", - "%.02f" % fps, - "-an", - "-i", - "-", - ] - if audiofile is not None: - cmd.extend(["-i", audiofile, "-acodec", "copy"]) - cmd.extend(["-vcodec", codec, "-preset", preset]) - if ffmpeg_params is not None: - cmd.extend(ffmpeg_params) - if bitrate is not None: - cmd.extend(["-b", bitrate]) - - if threads is not None: - cmd.extend(["-threads", str(threads)]) - - if (codec == "libx264") and (size[0] % 2 == 0) and (size[1] % 2 == 0): - cmd.extend(["-pix_fmt", "yuv420p"]) - cmd.extend([filename]) - - popen_params = cross_platform_popen_params( - {"stdout": sp.DEVNULL, "stderr": logfile, "stdin": sp.PIPE} - ) - - self.proc = sp.Popen(cmd, **popen_params) - -
[docs] def write_frame(self, img_array): - """Writes one frame in the file.""" - try: - self.proc.stdin.write(img_array.tobytes()) - except IOError as err: - _, ffmpeg_error = self.proc.communicate() - if ffmpeg_error is not None: - ffmpeg_error = ffmpeg_error.decode() - else: - # The error was redirected to a logfile with `write_logfile=True`, - # so read the error from that file instead - self.logfile.seek(0) - ffmpeg_error = self.logfile.read() - - error = ( - f"{err}\n\nMoviePy error: FFMPEG encountered the following error while " - f"writing file {self.filename}:\n\n {ffmpeg_error}" - ) - - if "Unknown encoder" in ffmpeg_error: - error += ( - "\n\nThe video export failed because FFMPEG didn't find the " - f"specified codec for video encoding {self.codec}. " - "Please install this codec or change the codec when calling " - "write_videofile.\nFor instance:\n" - " >>> clip.write_videofile('myvid.webm', codec='libvpx')" - ) - - elif "incorrect codec parameters ?" in ffmpeg_error: - error += ( - "\n\nThe video export failed, possibly because the codec " - f"specified for the video {self.codec} is not compatible with " - f"the given extension {self.ext}.\n" - "Please specify a valid 'codec' argument in write_videofile.\n" - "This would be 'libx264' or 'mpeg4' for mp4, " - "'libtheora' for ogv, 'libvpx for webm.\n" - "Another possible reason is that the audio codec was not " - "compatible with the video codec. For instance, the video " - "extensions 'ogv' and 'webm' only allow 'libvorbis' (default) as a" - "video codec." - ) - - elif "bitrate not specified" in ffmpeg_error: - error += ( - "\n\nThe video export failed, possibly because the bitrate " - "specified was too high or too low for the video codec." - ) - - elif "Invalid encoder type" in ffmpeg_error: - error += ( - "\n\nThe video export failed because the codec " - "or file extension you provided is not suitable for video" - ) - - raise IOError(error)
- -
[docs] def close(self): - """Closes the writer, terminating the subprocess if is still alive.""" - if self.proc: - self.proc.stdin.close() - if self.proc.stderr is not None: - self.proc.stderr.close() - self.proc.wait() - - self.proc = None
- - # Support the Context Manager protocol, to ensure that resources are cleaned up. - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close()
- - -
[docs]def ffmpeg_write_video( - clip, - filename, - fps, - codec="libx264", - bitrate=None, - preset="medium", - write_logfile=False, - audiofile=None, - threads=None, - ffmpeg_params=None, - logger="bar", - pixel_format=None, -): - """Write the clip to a videofile. See VideoClip.write_videofile for details - on the parameters. - """ - logger = proglog.default_bar_logger(logger) - - if write_logfile: - logfile = open(filename + ".log", "w+") - else: - logfile = None - logger(message="MoviePy - Writing video %s\n" % filename) - if not pixel_format: - pixel_format = "rgba" if clip.mask is not None else "rgb24" - with FFMPEG_VideoWriter( - filename, - clip.size, - fps, - codec=codec, - preset=preset, - bitrate=bitrate, - logfile=logfile, - audiofile=audiofile, - threads=threads, - ffmpeg_params=ffmpeg_params, - pixel_format=pixel_format, - ) as writer: - for t, frame in clip.iter_frames( - logger=logger, with_times=True, fps=fps, dtype="uint8" - ): - if clip.mask is not None: - mask = 255 * clip.mask.get_frame(t) - if mask.dtype != "uint8": - mask = mask.astype("uint8") - frame = np.dstack([frame, mask]) - - writer.write_frame(frame) - - if write_logfile: - logfile.close() - logger(message="MoviePy - Done !")
- - -
[docs]def ffmpeg_write_image(filename, image, logfile=False, pixel_format=None): - """Writes an image (HxWx3 or HxWx4 numpy array) to a file, using ffmpeg. - - Parameters - ---------- - - filename : str - Path to the output file. - - image : np.ndarray - Numpy array with the image data. - - logfile : bool, optional - Writes the ffmpeg output inside a logging file (``True``) or not - (``False``). - - pixel_format : str, optional - Pixel format for ffmpeg. If not defined, it will be discovered checking - if the image data contains an alpha channel (``"rgba"``) or not - (``"rgb24"``). - """ - if image.dtype != "uint8": - image = image.astype("uint8") - if not pixel_format: - pixel_format = "rgba" if (image.shape[2] == 4) else "rgb24" - - cmd = [ - FFMPEG_BINARY, - "-y", - "-s", - "%dx%d" % (image.shape[:2][::-1]), - "-f", - "rawvideo", - "-pix_fmt", - pixel_format, - "-i", - "-", - filename, - ] - - if logfile: - log_file = open(filename + ".log", "w+") - else: - log_file = sp.PIPE - - popen_params = cross_platform_popen_params( - {"stdout": sp.DEVNULL, "stderr": log_file, "stdin": sp.PIPE} - ) - - proc = sp.Popen(cmd, **popen_params) - out, err = proc.communicate(image.tobytes()) - - if proc.returncode: - error = ( - f"{err}\n\nMoviePy error: FFMPEG encountered the following error while " - f"writing file {filename} with command {cmd}:\n\n {err.decode()}" - ) - - raise IOError(error) - - del proc
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/io/ffplay_previewer.html b/_modules/moviepy/video/io/ffplay_previewer.html deleted file mode 100644 index dadb2bf8b..000000000 --- a/_modules/moviepy/video/io/ffplay_previewer.html +++ /dev/null @@ -1,581 +0,0 @@ - - - - - - - - - - - moviepy.video.io.ffplay_previewer — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.io.ffplay_previewer

-"""
-On the long term this will implement several methods to make videos
-out of VideoClips
-"""
-
-import subprocess as sp
-
-from moviepy.config import FFPLAY_BINARY
-from moviepy.tools import cross_platform_popen_params
-
-
-
[docs]class FFPLAY_VideoPreviewer: - """A class for FFPLAY-based video preview. - - Parameters - ---------- - - size : tuple or list - Size of the output video in pixels (width, height). - - fps : int - Frames per second in the output video file. - - pixel_format : str - Pixel format for the output video file, ``rgb24`` for normal video, ``rgba`` - if video with mask. - """ - - def __init__( - self, - size, - fps, - pixel_format, - ): - # order is important - cmd = [ - FFPLAY_BINARY, - "-autoexit", # If you dont precise, ffplay dont stop at end - "-f", - "rawvideo", - "-pixel_format", - pixel_format, - "-video_size", - "%dx%d" % (size[0], size[1]), - "-framerate", - "%.02f" % fps, - "-", - ] - - popen_params = cross_platform_popen_params( - {"stdout": sp.DEVNULL, "stderr": sp.STDOUT, "stdin": sp.PIPE} - ) - - self.proc = sp.Popen(cmd, **popen_params) - -
[docs] def show_frame(self, img_array): - """Writes one frame in the file.""" - try: - self.proc.stdin.write(img_array.tobytes()) - except IOError as err: - _, ffplay_error = self.proc.communicate() - if ffplay_error is not None: - ffplay_error = ffplay_error.decode() - - error = ( - f"{err}\n\nMoviePy error: FFPLAY encountered the following error while " - f"previewing clip :\n\n {ffplay_error}" - ) - - raise IOError(error)
- -
[docs] def close(self): - """Closes the writer, terminating the subprocess if is still alive.""" - if self.proc: - self.proc.stdin.close() - if self.proc.stderr is not None: - self.proc.stderr.close() - self.proc.wait() - - self.proc = None
- - # Support the Context Manager protocol, to ensure that resources are cleaned up. - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close()
- - -
[docs]def ffplay_preview_video( - clip, fps, pixel_format="rgb24", audio_flag=None, video_flag=None -): - """Preview the clip using ffplay. See VideoClip.preview for details - on the parameters. - - Parameters - ---------- - - clip : VideoClip - The clip to preview - - fps : int - Number of frames per seconds in the displayed video. - - pixel_format : str, optional - Warning: This is not used anywhere in the code and should probably - be remove. - It is believed pixel format rgb24 does not work properly for now because - it require applying mask on CompositeVideoClip and thoses are believed to - not be working. - - Pixel format for the output video file, ``rgb24`` for normal video, ``rgba`` - if video with mask - - audio_flag : Thread.Event, optional - A thread event that video will wait for. If not provided we ignore audio - - video_flag : Thread.Event, optional - A thread event that video will set after first frame has been shown. If not - provided, we simply ignore - """ - with FFPLAY_VideoPreviewer(clip.size, fps, pixel_format) as previewer: - first_frame = True - for t, frame in clip.iter_frames(with_times=True, fps=fps, dtype="uint8"): - previewer.show_frame(frame) - - # After first frame is shown, if we have audio/video flag, set video ready - # and wait for audio - if first_frame: - first_frame = False - - if video_flag: - video_flag.set() # say to the audio: video is ready - - if audio_flag: - audio_flag.wait() # wait for the audio to be ready
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/io/gif_writers.html b/_modules/moviepy/video/io/gif_writers.html deleted file mode 100644 index b5da2b37f..000000000 --- a/_modules/moviepy/video/io/gif_writers.html +++ /dev/null @@ -1,464 +0,0 @@ - - - - - - - - - - - moviepy.video.io.gif_writers — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.io.gif_writers

-"""MoviePy video GIFs writing."""
-
-import imageio.v3 as iio
-import proglog
-
-from moviepy.decorators import requires_duration, use_clip_fps_by_default
-
-
-
[docs]@requires_duration -@use_clip_fps_by_default -def write_gif_with_imageio(clip, filename, fps=None, loop=0, logger="bar"): - """Writes the gif with the Python library ImageIO (calls FreeImage).""" - logger = proglog.default_bar_logger(logger) - - with iio.imopen(filename, "w", plugin="pillow") as writer: - logger(message="MoviePy - Building file %s with imageio." % filename) - for frame in clip.iter_frames(fps=fps, logger=logger, dtype="uint8"): - writer.write( - frame, duration=1000 / fps, loop=loop - ) # Duration is in ms not s
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/tools/credits.html b/_modules/moviepy/video/tools/credits.html deleted file mode 100644 index 0d2c1dbda..000000000 --- a/_modules/moviepy/video/tools/credits.html +++ /dev/null @@ -1,585 +0,0 @@ - - - - - - - - - - - moviepy.video.tools.credits — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.tools.credits

-"""Contains different functions to make end and opening credits, even though it is
-difficult to fill everyone needs in this matter.
-"""
-
-from moviepy.decorators import convert_path_to_string
-from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
-from moviepy.video.fx.Resize import Resize
-from moviepy.video.VideoClip import ImageClip, TextClip
-
-
-
[docs]class CreditsClip(TextClip): - """Credits clip. - - Parameters - ---------- - - creditfile - A string or path like object pointing to a text file - whose content must be as follows: :: - - # This is a comment - # The next line says : leave 4 blank lines - .blank 4 - - ..Executive Story Editor - MARCEL DURAND - - ..Associate Producers - MARTIN MARCEL - DIDIER MARTIN - - ..Music Supervisor - JEAN DIDIER - - - width - Total width of the credits text in pixels - - gap - Horizontal gap in pixels between the jobs and the names - - color - Color of the text. See ``TextClip.list('color')`` - for a list of acceptable names. - - font - Name of the font to use. See ``TextClip.list('font')`` for - the list of fonts you can use on your computer. - - font_size - Size of font to use - - stroke_color - Color of the stroke (=contour line) of the text. If ``None``, - there will be no stroke. - - stroke_width - Width of the stroke, in pixels. Can be a float, like 1.5. - - bg_color - Color of the background. If ``None``, the background will be transparent. - - Returns - ------- - - image - An ImageClip instance that looks like this and can be scrolled - to make some credits: :: - - Executive Story Editor MARCEL DURAND - Associate Producers MARTIN MARCEL - DIDIER MARTIN - Music Supervisor JEAN DIDIER - - """ - - @convert_path_to_string("creditfile") - def __init__( - self, - creditfile, - width, - color="white", - stroke_color="black", - stroke_width=2, - font="Impact-Normal", - font_size=60, - bg_color=None, - gap=0, - ): - # Parse the .txt file - texts = [] - one_line = True - - with open(creditfile) as file: - for line in file: - if line.startswith(("\n", "#")): - # exclude blank lines or comments - continue - elif line.startswith(".blank"): - # ..blank n - for i in range(int(line.split(" ")[1])): - texts.append(["\n", "\n"]) - elif line.startswith(".."): - texts.append([line[2:], ""]) - one_line = True - elif one_line: - texts.append(["", line]) - one_line = False - else: - texts.append(["\n", line]) - - left, right = ("".join(line) for line in zip(*texts)) - - # Make two columns for the credits - left, right = [ - TextClip( - text=txt, - color=color, - stroke_color=stroke_color, - stroke_width=stroke_width, - font=font, - font_size=font_size, - text_align=align, - ) - for txt, align in [(left, "left"), (right, "right")] - ] - - both_columns = CompositeVideoClip( - [left, right.with_position((left.w + gap, 0))], - size=(left.w + right.w + gap, right.h), - bg_color=bg_color, - ) - - # Scale to the required size - scaled = both_columns.with_effects([Resize(width=width)]) - - # Transform the CompositeVideoClip into an ImageClip - - # Calls ImageClip.__init__() - super(TextClip, self).__init__(scaled.get_frame(0)) - self.mask = ImageClip(scaled.mask.get_frame(0), is_mask=True)
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/tools/cuts.html b/_modules/moviepy/video/tools/cuts.html deleted file mode 100644 index ba4416070..000000000 --- a/_modules/moviepy/video/tools/cuts.html +++ /dev/null @@ -1,956 +0,0 @@ - - - - - - - - - - - moviepy.video.tools.cuts — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.tools.cuts

-"""Contains everything that can help automate the cuts in MoviePy."""
-
-from collections import defaultdict
-
-import numpy as np
-
-from moviepy.decorators import convert_parameter_to_seconds, use_clip_fps_by_default
-
-
-
[docs]@use_clip_fps_by_default -@convert_parameter_to_seconds(["start_time"]) -def find_video_period(clip, fps=None, start_time=0.3): - """Find the period of a video based on frames correlation. - - Parameters - ---------- - - clip : moviepy.Clip.Clip - Clip for which the video period will be computed. - - fps : int, optional - Number of frames per second used computing the period. Higher values will - produce more accurate periods, but the execution time will be longer. - - start_time : float, optional - First timeframe used to calculate the period of the clip. - - Examples - -------- - - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import find_video_period - >>> - >>> clip = VideoFileClip("media/chaplin.mp4").with_subclip(0, 1).loop(2) - >>> round(videotools.find_video_period(clip, fps=80), 6) - 1 - """ - - def frame(t): - return clip.get_frame(t).flatten() - - timings = np.arange(start_time, clip.duration, 1 / fps)[1:] - ref = frame(0) - corrs = [np.corrcoef(ref, frame(t))[0, 1] for t in timings] - return timings[np.argmax(corrs)]
- - -
[docs]class FramesMatch: - """Frames match inside a set of frames. - - Parameters - ---------- - - start_time : float - Starting time. - - end_time : float - End time. - - min_distance : float - Lower bound on the distance between the first and last frames - - max_distance : float - Upper bound on the distance between the first and last frames - """ - - def __init__(self, start_time, end_time, min_distance, max_distance): - self.start_time = start_time - self.end_time = end_time - self.min_distance = min_distance - self.max_distance = max_distance - self.time_span = end_time - start_time - - def __str__(self): # pragma: no cover - return "(%.04f, %.04f, %.04f, %.04f)" % ( - self.start_time, - self.end_time, - self.min_distance, - self.max_distance, - ) - - def __repr__(self): # pragma: no cover - return self.__str__() - - def __iter__(self): # pragma: no cover - return iter( - (self.start_time, self.end_time, self.min_distance, self.max_distance) - ) - - def __eq__(self, other): - return ( - other.start_time == self.start_time - and other.end_time == self.end_time - and other.min_distance == self.min_distance - and other.max_distance == self.max_distance - )
- - -
[docs]class FramesMatches(list): - """Frames matches inside a set of frames. - - You can instantiate it passing a list of FramesMatch objects or - using the class methods ``load`` and ``from_clip``. - - Parameters - ---------- - - lst : list - Iterable of FramesMatch objects. - """ - - def __init__(self, lst): - list.__init__(self, sorted(lst, key=lambda e: e.max_distance)) - -
[docs] def best(self, n=1, percent=None): - """Returns a new instance of FramesMatches object or a FramesMatch - from the current class instance given different conditions. - - By default returns the first FramesMatch that the current instance - stores. - - Parameters - ---------- - - n : int, optional - Number of matches to retrieve from the current FramesMatches object. - Only has effect when ``percent=None``. - - percent : float, optional - Percent of the current match to retrieve. - - Returns - ------- - - FramesMatch or FramesMatches : If the number of matches to retrieve is - greater than 1 returns a FramesMatches object, otherwise a - FramesMatch. - - """ - if percent is not None: - n = len(self) * percent / 100 - return self[0] if n == 1 else FramesMatches(self[: int(n)])
- -
[docs] def filter(self, condition): - """Return a FramesMatches object obtained by filtering out the - FramesMatch which do not satistify a condition. - - Parameters - ---------- - - condition : func - Function which takes a FrameMatch object as parameter and returns a - bool. - - Examples - -------- - >>> # Only keep the matches corresponding to (> 1 second) sequences. - >>> new_matches = matches.filter( lambda match: match.time_span > 1) - """ - return FramesMatches(filter(condition, self))
- -
[docs] def save(self, filename): - """Save a FramesMatches object to a file. - - Parameters - ---------- - - filename : str - Path to the file in which will be dumped the FramesMatches object data. - """ - np.savetxt( - filename, - np.array([np.array(list(e)) for e in self]), - fmt="%.03f", - delimiter="\t", - )
- -
[docs] @staticmethod - def load(filename): - """Load a FramesMatches object from a file. - - Parameters - ---------- - - filename : str - Path to the file to use loading a FramesMatches object. - - Examples - -------- - >>> matching_frames = FramesMatches.load("somefile") - """ - arr = np.loadtxt(filename) - mfs = [FramesMatch(*e) for e in arr] - return FramesMatches(mfs)
- -
[docs] @staticmethod - def from_clip(clip, distance_threshold, max_duration, fps=None, logger="bar"): - """Finds all the frames that look alike in a clip, for instance to make - a looping GIF. - - Parameters - ---------- - - clip : moviepy.video.VideoClip.VideoClip - A MoviePy video clip. - - distance_threshold : float - Distance above which a match is rejected. - - max_duration : float - Maximal duration (in seconds) between two matching frames. - - fps : int, optional - Frames per second (default will be ``clip.fps``). - - logger : str, optional - Either ``"bar"`` for progress bar or ``None`` or any Proglog logger. - - Returns - ------- - - FramesMatches - All pairs of frames with ``end_time - start_time < max_duration`` - and whose distance is under ``distance_threshold``. - - Examples - -------- - - We find all matching frames in a given video and turn the best match - with a duration of 1.5 seconds or more into a GIF: - - >>> from moviepy import VideoFileClip - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> clip = VideoFileClip("foo.mp4").resize(width=200) - >>> matches = FramesMatches.from_clip( - ... clip, distance_threshold=10, max_duration=3, # will take time - ... ) - >>> best = matches.filter(lambda m: m.time_span > 1.5).best() - >>> clip.with_subclip(best.start_time, best.end_time).write_gif("foo.gif") - """ - N_pixels = clip.w * clip.h * 3 - - def dot_product(F1, F2): - return (F1 * F2).sum() / N_pixels - - frame_dict = {} # will store the frames and their mutual distances - - def distance(t1, t2): - uv = dot_product(frame_dict[t1]["frame"], frame_dict[t2]["frame"]) - u, v = frame_dict[t1]["|F|sq"], frame_dict[t2]["|F|sq"] - return np.sqrt(u + v - 2 * uv) - - matching_frames = [] # the final result. - - for t, frame in clip.iter_frames(with_times=True, logger=logger): - flat_frame = 1.0 * frame.flatten() - F_norm_sq = dot_product(flat_frame, flat_frame) - F_norm = np.sqrt(F_norm_sq) - - for t2 in list(frame_dict.keys()): - # forget old frames, add 't' to the others frames - # check for early rejections based on differing norms - if (t - t2) > max_duration: - frame_dict.pop(t2) - else: - frame_dict[t2][t] = { - "min": abs(frame_dict[t2]["|F|"] - F_norm), - "max": frame_dict[t2]["|F|"] + F_norm, - } - frame_dict[t2][t]["rejected"] = ( - frame_dict[t2][t]["min"] > distance_threshold - ) - - t_F = sorted(frame_dict.keys()) - - frame_dict[t] = {"frame": flat_frame, "|F|sq": F_norm_sq, "|F|": F_norm} - - for i, t2 in enumerate(t_F): - # Compare F(t) to all the previous frames - - if frame_dict[t2][t]["rejected"]: - continue - - dist = distance(t, t2) - frame_dict[t2][t]["min"] = frame_dict[t2][t]["max"] = dist - frame_dict[t2][t]["rejected"] = dist >= distance_threshold - - for t3 in t_F[i + 1 :]: - # For all the next times t3, use d(F(t), F(end_time)) to - # update the bounds on d(F(t), F(t3)). See if you can - # conclude on whether F(t) and F(t3) match. - t3t, t2t3 = frame_dict[t3][t], frame_dict[t2][t3] - t3t["max"] = min(t3t["max"], dist + t2t3["max"]) - t3t["min"] = max(t3t["min"], dist - t2t3["max"], t2t3["min"] - dist) - - if t3t["min"] > distance_threshold: - t3t["rejected"] = True - - # Store all the good matches (end_time,t) - matching_frames += [ - (t1, t, frame_dict[t1][t]["min"], frame_dict[t1][t]["max"]) - for t1 in frame_dict - if (t1 != t) and not frame_dict[t1][t]["rejected"] - ] - - return FramesMatches([FramesMatch(*e) for e in matching_frames])
- -
[docs] def select_scenes( - self, match_threshold, min_time_span, nomatch_threshold=None, time_distance=0 - ): - """Select the scenes at which a video clip can be reproduced as the - smoothest possible way, mainly oriented for the creation of GIF images. - - Parameters - ---------- - - match_threshold : float - Maximum distance possible between frames. The smaller, the - better-looping the GIFs are. - - min_time_span : float - Minimum duration for a scene. Only matches with a duration longer - than the value passed to this parameters will be extracted. - - nomatch_threshold : float, optional - Minimum distance possible between frames. If is ``None``, then it is - chosen equal to ``match_threshold``. - - time_distance : float, optional - Minimum time offset possible between matches. - - Returns - ------- - - FramesMatches : New instance of the class with the selected scenes. - - Examples - -------- - - >>> from pprint import pprint - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> ch_clip = VideoFileClip("media/chaplin.mp4").with_subclip(1, 4) - >>> mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip] - >>> clip = concatenate_videoclips(mirror_and_clip) - >>> - >>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes( - ... 1, 2, nomatch_threshold=0, - ... ) - >>> print(result) - [(1.0000, 4.0000, 0.0000, 0.0000), - (1.1600, 3.8400, 0.0000, 0.0000), - (1.2800, 3.7200, 0.0000, 0.0000), - (1.4000, 3.6000, 0.0000, 0.0000)] - """ - if nomatch_threshold is None: - nomatch_threshold = match_threshold - - dict_starts = defaultdict(lambda: []) - for start, end, min_distance, max_distance in self: - dict_starts[start].append([end, min_distance, max_distance]) - - starts_ends = sorted(dict_starts.items(), key=lambda k: k[0]) - - result = [] - min_start = 0 - for start, ends_distances in starts_ends: - if start < min_start: - continue - - ends = [end for (end, min_distance, max_distance) in ends_distances] - great_matches = [ - (end, min_distance, max_distance) - for (end, min_distance, max_distance) in ends_distances - if max_distance < match_threshold - ] - - great_long_matches = [ - (end, min_distance, max_distance) - for (end, min_distance, max_distance) in great_matches - if (end - start) > min_time_span - ] - - if not great_long_matches: - continue # No GIF can be made starting at this time - - poor_matches = { - end - for (end, min_distance, max_distance) in ends_distances - if min_distance > nomatch_threshold - } - short_matches = {end for end in ends if (end - start) <= 0.6} - - if not poor_matches.intersection(short_matches): - continue - - end = max(end for (end, min_distance, max_distance) in great_long_matches) - end, min_distance, max_distance = next( - e for e in great_long_matches if e[0] == end - ) - - result.append(FramesMatch(start, end, min_distance, max_distance)) - min_start = start + time_distance - - return FramesMatches(result)
- -
[docs] def write_gifs(self, clip, gifs_dir, **kwargs): - """Extract the matching frames represented by the instance from a clip - and write them as GIFs in a directory, one GIF for each matching frame. - - Parameters - ---------- - - clip : video.VideoClip.VideoClip - A video clip whose frames scenes you want to obtain as GIF images. - - gif_dir : str - Directory in which the GIF images will be written. - - kwargs - Passed as ``clip.write_gif`` optional arguments. - - Examples - -------- - - >>> import os - >>> from pprint import pprint - >>> from moviepy import * - >>> from moviepy.video.tools.cuts import FramesMatches - >>> - >>> ch_clip = VideoFileClip("media/chaplin.mp4").with_subclip(1, 4) - >>> clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip]) - >>> - >>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes( - ... 1, 2, nomatch_threshold=0, - ... ) - >>> - >>> os.mkdir("foo") - >>> result.write_gifs(clip, "foo") - MoviePy - Building file foo/00000100_00000400.gif with imageio. - MoviePy - Building file foo/00000115_00000384.gif with imageio. - MoviePy - Building file foo/00000128_00000372.gif with imageio. - MoviePy - Building file foo/00000140_00000360.gif with imageio. - """ - for start, end, _, _ in self: - name = "%s/%08d_%08d.gif" % (gifs_dir, 100 * start, 100 * end) - clip.with_subclip(start, end).write_gif(name, **kwargs)
- - -
[docs]@use_clip_fps_by_default -def detect_scenes( - clip=None, luminosities=None, luminosity_threshold=10, logger="bar", fps=None -): - """Detects scenes of a clip based on luminosity changes. - - Note that for large clip this may take some time. - - Returns - ------- - - tuple : cuts, luminosities - cuts is a series of cuts [(0,t1), (t1,t2),...(...,tf)] - luminosities are the luminosities computed for each - frame of the clip. - - Parameters - ---------- - - clip : video.VideoClip.VideoClip, optional - A video clip. Can be None if a list of luminosities is - provided instead. If provided, the luminosity of each - frame of the clip will be computed. If the clip has no - 'fps' attribute, you must provide it. - - luminosities : list, optional - A list of luminosities, e.g. returned by detect_scenes - in a previous run. - - luminosity_threshold : float, optional - Determines a threshold above which the 'luminosity jumps' - will be considered as scene changes. A scene change is defined - as a change between 2 consecutive frames that is larger than - (avg * thr) where avg is the average of the absolute changes - between consecutive frames. - - logger : str, optional - Either ``"bar"`` for progress bar or ``None`` or any Proglog logger. - - fps : int, optional - Frames per second value. Must be provided if you provide - no clip or a clip without fps attribute. - """ - if luminosities is None: - luminosities = [ - f.sum() for f in clip.iter_frames(fps=fps, dtype="uint32", logger=logger) - ] - - luminosities = np.array(luminosities, dtype=float) - if clip is not None: - end = clip.duration - else: - end = len(luminosities) * (1.0 / fps) - luminosity_diffs = abs(np.diff(luminosities)) - avg = luminosity_diffs.mean() - luminosity_jumps = ( - 1 + np.array(np.nonzero(luminosity_diffs > luminosity_threshold * avg))[0] - ) - timings = [0] + list((1.0 / fps) * luminosity_jumps) + [end] - cuts = [(t1, t2) for t1, t2 in zip(timings, timings[1:])] - return cuts, luminosities
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/tools/drawing.html b/_modules/moviepy/video/tools/drawing.html deleted file mode 100644 index 431b17212..000000000 --- a/_modules/moviepy/video/tools/drawing.html +++ /dev/null @@ -1,773 +0,0 @@ - - - - - - - - - - - moviepy.video.tools.drawing — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.tools.drawing

-"""Deals with making images (np arrays). It provides drawing
-methods that are difficult to do with the existing Python libraries.
-"""
-
-import numpy as np
-
-
-
[docs]def blit(im1, im2, pos=None, mask=None): - """Blit an image over another. - - Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the - ``mask`` if provided. - """ - if pos is None: - pos = (0, 0) # pragma: no cover - else: - # Cast to tuple in case pos is not subscriptable. - pos = tuple(pos) - im2.paste(im1, pos, mask) - return im2
- - -
[docs]def color_gradient( - size, - p1, - p2=None, - vector=None, - radius=None, - color_1=0.0, - color_2=1.0, - shape="linear", - offset=0, -): - """Draw a linear, bilinear, or radial gradient. - - The result is a picture of size ``size``, whose color varies - gradually from color `color_1` in position ``p1`` to color ``color_2`` - in position ``p2``. - - If it is a RGB picture the result must be transformed into - a 'uint8' array to be displayed normally: - - Parameters - ---------- - - size : tuple or list - Size (width, height) in pixels of the final image array. - - p1 : tuple or list - Position for the first coordinate of the gradient in pixels (x, y). - The color 'before' ``p1`` is ``color_1`` and it gradually changes in - the direction of ``p2`` until it is ``color_2`` when it reaches ``p2``. - - p2 : tuple or list, optional - Position for the second coordinate of the gradient in pixels (x, y). - Coordinates (x, y) of the limit point for ``color_1`` - and ``color_2``. - - vector : tuple or list, optional - A vector (x, y) in pixels that can be provided instead of ``p2``. - ``p2`` is then defined as (p1 + vector). - - color_1 : tuple or list, optional - Starting color for the gradient. As default, black. Either floats - between 0 and 1 (for gradients used in masks) or [R, G, B] arrays - (for colored gradients). - - color_2 : tuple or list, optional - Color for the second point in the gradient. As default, white. Either - floats between 0 and 1 (for gradients used in masks) or [R, G, B] - arrays (for colored gradients). - - shape : str, optional - Shape of the gradient. Can be either ``"linear"``, ``"bilinear"`` or - ``"circular"``. In a linear gradient the color varies in one direction, - from point ``p1`` to point ``p2``. In a bilinear gradient it also - varies symmetrically from ``p1`` in the other direction. In a circular - gradient it goes from ``color_1`` to ``color_2`` in all directions. - - radius : float, optional - If ``shape="radial"``, the radius of the gradient is defined with the - parameter ``radius``, in pixels. - - offset : float, optional - Real number between 0 and 1 indicating the fraction of the vector - at which the gradient actually starts. For instance if ``offset`` - is 0.9 in a gradient going from p1 to p2, then the gradient will - only occur near p2 (before that everything is of color ``color_1``) - If the offset is 0.9 in a radial gradient, the gradient will - occur in the region located between 90% and 100% of the radius, - this creates a blurry disc of radius ``d(p1, p2)``. - - Returns - ------- - - image - An Numpy array of dimensions (width, height, n_colors) of type float - representing the image of the gradient. - - Examples - -------- - - >>> color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black - [[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]] - >>> - >>> color_gradient( # from red to green - ... (10, 1), # size - ... (0, 0), # p1 - ... p2=(10, 0), - ... color_1=(255, 0, 0), # red - ... color_2=(0, 255, 0), # green - ... ) - [[[ 0. 255. 0. ] - [ 25.5 229.5 0. ] - [ 51. 204. 0. ] - [ 76.5 178.5 0. ] - [102. 153. 0. ] - [127.5 127.5 0. ] - [153. 102. 0. ] - [178.5 76.5 0. ] - [204. 51. 0. ] - [229.5 25.5 0. ]]] - """ - # np-arrayize and change x,y coordinates to y,x - w, h = size - - color_1 = np.array(color_1).astype(float) - color_2 = np.array(color_2).astype(float) - - if shape == "bilinear": - if vector is None: - if p2 is None: - raise ValueError("You must provide either 'p2' or 'vector'") - vector = np.array(p2) - np.array(p1) - - m1, m2 = [ - color_gradient( - size, - p1, - vector=v, - color_1=1.0, - color_2=0.0, - shape="linear", - offset=offset, - ) - for v in [vector, [-v for v in vector]] - ] - - arr = np.maximum(m1, m2) - if color_1.size > 1: - arr = np.dstack(3 * [arr]) - return arr * color_1 + (1 - arr) * color_2 - - p1 = np.array(p1[::-1]).astype(float) - - M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float) - - if shape == "linear": - if vector is None: - if p2 is not None: - vector = np.array(p2[::-1]) - p1 - else: - raise ValueError("You must provide either 'p2' or 'vector'") - else: - vector = np.array(vector[::-1]) - - norm = np.linalg.norm(vector) - n_vec = vector / norm**2 # norm 1/norm(vector) - - p1 = p1 + offset * vector - arr = (M - p1).dot(n_vec) / (1 - offset) - arr = np.minimum(1, np.maximum(0, arr)) - if color_1.size > 1: - arr = np.dstack(3 * [arr]) - return arr * color_1 + (1 - arr) * color_2 - - elif shape == "radial": - if (radius or 0) == 0: - arr = np.ones((h, w)) - else: - arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius - arr = arr / ((1 - offset) * radius) - arr = np.minimum(1.0, np.maximum(0, arr)) - - if color_1.size > 1: - arr = np.dstack(3 * [arr]) - return (1 - arr) * color_1 + arr * color_2 - raise ValueError("Invalid shape, should be either 'radial', 'linear' or 'bilinear'")
- - -
[docs]def color_split( - size, - x=None, - y=None, - p1=None, - p2=None, - vector=None, - color_1=0, - color_2=1.0, - gradient_width=0, -): - """Make an image split in 2 colored regions. - - Returns an array of size ``size`` divided in two regions called 1 and - 2 in what follows, and which will have colors color_1 and color_2 - respectively. - - Parameters - ---------- - - x : int, optional - If provided, the image is split horizontally in x, the left - region being region 1. - - y : int, optional - If provided, the image is split vertically in y, the top region - being region 1. - - p1, p2: tuple or list, optional - Positions (x1, y1), (x2, y2) in pixels, where the numbers can be - floats. Region 1 is defined as the whole region on the left when - going from ``p1`` to ``p2``. - - p1, vector: tuple or list, optional - ``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be - floats. Region 1 is then the region on the left when starting - in position ``p1`` and going in the direction given by ``vector``. - - gradient_width : float, optional - If not zero, the split is not sharp, but gradual over a region of - width ``gradient_width`` (in pixels). This is preferable in many - situations (for instance for antialiasing). - - Examples - -------- - - >>> size = [200, 200] - >>> - >>> # an image with all pixels with x<50 =0, the others =1 - >>> color_split(size, x=50, color_1=0, color_2=1) - >>> - >>> # an image with all pixels with y<50 red, the others green - >>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0]) - >>> - >>> # An image split along an arbitrary line (see below) - >>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1) - """ - if gradient_width or ((x is None) and (y is None)): - if p2 is not None: - vector = np.array(p2) - np.array(p1) - elif x is not None: - vector = np.array([0, -1.0]) - p1 = np.array([x, 0]) - elif y is not None: - vector = np.array([1.0, 0.0]) - p1 = np.array([0, y]) - - x, y = vector - vector = np.array([y, -x]).astype("float") - norm = np.linalg.norm(vector) - vector = max(0.1, gradient_width) * vector / norm - return color_gradient( - size, p1, vector=vector, color_1=color_1, color_2=color_2, shape="linear" - ) - else: - w, h = size - shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1)) - arr = np.zeros(shape) - if x: - arr[:, :x] = color_1 - arr[:, x:] = color_2 - elif y: - arr[:y] = color_1 - arr[y:] = color_2 - return arr
- - -
[docs]def circle(screensize, center, radius, color=1.0, bg_color=0, blur=1): - """Draw an image with a circle. - - Draws a circle of color ``color``, on a background of color ``bg_color``, - on a screen of size ``screensize`` at the position ``center=(x, y)``, - with a radius ``radius`` but slightly blurred on the border by ``blur`` - pixels. - - Parameters - ---------- - - screensize : tuple or list - Size of the canvas. - - center : tuple or list - Center of the circle. - - radius : float - Radius of the circle, in pixels. - - bg_color : tuple or float, optional - Color for the background of the canvas. As default, black. - - blur : float, optional - Blur for the border of the circle. - - Examples - -------- - - >>> from moviepy.video.tools.drawing import circle - >>> - >>> circle( - ... (5, 5), # size - ... (2, 2), # center - ... 2, # radius - ... ) - array([[0. , 0. , 0. , 0. , 0. ], - [0. , 0.58578644, 1. , 0.58578644, 0. ], - [0. , 1. , 1. , 1. , 0. ], - [0. , 0.58578644, 1. , 0.58578644, 0. ], - [0. , 0. , 0. , 0. , 0. ]]) - """ - offset = 1.0 * (radius - blur) / radius if radius else 0 - return color_gradient( - screensize, - p1=center, - radius=radius, - color_1=color, - color_2=bg_color, - shape="radial", - offset=offset, - )
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/tools/interpolators.html b/_modules/moviepy/video/tools/interpolators.html deleted file mode 100644 index 627fe35fe..000000000 --- a/_modules/moviepy/video/tools/interpolators.html +++ /dev/null @@ -1,680 +0,0 @@ - - - - - - - - - - - moviepy.video.tools.interpolators — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.tools.interpolators

-"""Classes for easy interpolation of trajectories and curves."""
-
-import numpy as np
-
-
-
[docs]class Interpolator: - """Poorman's linear interpolator. - - Parameters - ---------- - - tt : list, optional - List of time frames for the interpolator. - - ss : list, optional - List of values for the interpolator. - - ttss : list, optional - Lists of time frames and their correspondients values for the - interpolator. This argument can be used instead of ``tt`` and ``ss`` - to instantiate the interpolator using an unique argument. - - left : float, optional - Value to return when ``t < tt[0]``. - - right : float, optional - Value to return when ``t > tt[-1]``. - - - Examples - -------- - - >>> # instantiate using `tt` and `ss` - >>> interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5]) - >>> - >>> # instantiate using `ttss` - >>> interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]]) # [t, value] - """ - - def __init__(self, tt=None, ss=None, ttss=None, left=None, right=None): - if ttss is not None: - tt, ss = zip(*ttss) - - self.tt = 1.0 * np.array(tt) - self.ss = 1.0 * np.array(ss) - self.left = left - self.right = right - self.tmin, self.tmax = min(tt), max(tt) - - def __call__(self, t): - """Interpolates ``t``. - - Parameters - ---------- - - t : float - Time frame for which the correspondent value will be returned. - """ - return np.interp(t, self.tt, self.ss, self.left, self.right)
- - -
[docs]class Trajectory: - """Trajectory compound by time frames and (x, y) pixels. - - It's designed as an interpolator, so you can get the position at a given - time ``t``. You can instantiate it from a file using the methods - ``from_file`` and ``load_list``. - - - Parameters - ---------- - - tt : list or numpy.ndarray - Time frames. - - xx : list or numpy.ndarray - X positions in the trajectory. - - yy : list or numpy.ndarray - Y positions in the trajectory. - - - Examples - -------- - - >>> trajectory = Trajectory([0, .166, .333], [554, 474, 384], [100, 90, 91]) - """ - - def __init__(self, tt, xx, yy): - self.tt = 1.0 * np.array(tt) - self.xx = np.array(xx) - self.yy = np.array(yy) - self.update_interpolators() - - def __call__(self, t): - """Interpolates the trajectory at the given time ``t``. - - Parameters - ---------- - - t : float - Time for which to the corresponding position will be returned. - """ - return np.array([self.xi(t), self.yi(t)]) - -
[docs] def addx(self, x): - """Adds a value to the ``xx`` position of the trajectory. - - Parameters - ---------- - - x : int - Value added to ``xx`` in the trajectory. - - - Returns - ------- - - Trajectory : new instance with the new X position included. - """ - return Trajectory(self.tt, self.xx + x, self.yy)
- -
[docs] def addy(self, y): - """Adds a value to the ``yy`` position of the trajectory. - - Parameters - ---------- - - y : int - Value added to ``yy`` in the trajectory. - - - Returns - ------- - - Trajectory : new instance with the new Y position included. - """ - return Trajectory(self.tt, self.xx, self.yy + y)
- -
[docs] def update_interpolators(self): - """Updates the internal X and Y position interpolators for the instance.""" - self.xi = Interpolator(self.tt, self.xx) - self.yi = Interpolator(self.tt, self.yy)
- -
[docs] def txy(self, tms=False): - """Returns all times with the X and Y values of each position. - - Parameters - ---------- - - tms : bool, optional - If is ``True``, the time will be returned in milliseconds. - """ - return zip((1000 if tms else 1) * self.tt, self.xx, self.yy)
- -
[docs] def to_file(self, filename): - """Saves the trajectory data in a text file. - - Parameters - ---------- - - filename : str - Path to the location of the new trajectory text file. - """ - np.savetxt( - filename, - np.array(list(self.txy(tms=True))), - fmt="%d", - delimiter="\t", - )
- -
[docs] @staticmethod - def from_file(filename): - """Instantiates an object of Trajectory using a data text file. - - Parameters - ---------- - - filename : str - Path to the location of trajectory text file to load. - - - Returns - ------- - - Trajectory : new instance loaded from text file. - """ - arr = np.loadtxt(filename, delimiter="\t") - tt, xx, yy = arr.T - return Trajectory(1.0 * tt / 1000, xx, yy)
- -
[docs] @staticmethod - def save_list(trajs, filename): - """Saves a set of trajectories into a text file. - - Parameters - ---------- - - trajs : list - List of trajectories to be saved. - - filename : str - Path of the text file that will store the trajectories data. - """ - N = len(trajs) - arr = np.hstack([np.array(list(t.txy(tms=True))) for t in trajs]) - np.savetxt( - filename, - arr, - fmt="%d", - delimiter="\t", - header="\t".join(N * ["t(ms)", "x", "y"]), - )
- -
[docs] @staticmethod - def load_list(filename): - """Loads a list of trajectories from a data text file. - - Parameters - ---------- - - filename : str - Path of the text file that stores the data of a set of trajectories. - - - Returns - ------- - - list : List of trajectories loaded from the file. - """ - arr = np.loadtxt(filename, delimiter="\t").T - Nlines = arr.shape[0] - return [ - Trajectory(tt=1.0 * a[0] / 1000, xx=a[1], yy=a[2]) - for a in np.split(arr, Nlines / 3) - ]
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_modules/moviepy/video/tools/subtitles.html b/_modules/moviepy/video/tools/subtitles.html deleted file mode 100644 index 08e1e1306..000000000 --- a/_modules/moviepy/video/tools/subtitles.html +++ /dev/null @@ -1,641 +0,0 @@ - - - - - - - - - - - moviepy.video.tools.subtitles — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -

Source code for moviepy.video.tools.subtitles

-"""Experimental module for subtitles support."""
-
-import re
-
-import numpy as np
-
-from moviepy.decorators import convert_path_to_string
-from moviepy.tools import convert_to_seconds
-from moviepy.video.VideoClip import TextClip, VideoClip
-
-
-
[docs]class SubtitlesClip(VideoClip): - """A Clip that serves as "subtitle track" in videos. - - One particularity of this class is that the images of the - subtitle texts are not generated beforehand, but only if - needed. - - Parameters - ---------- - - subtitles - Either the name of a file as a string or path-like object, or a list - - font - Path to a font file to be used. Optional if make_textclip is provided. - - make_textclip - A custom function to use for text clip generation. If None, a TextClip - will be generated. - - The function must take a text as argument and return a VideoClip - to be used as caption - - encoding - Optional, specifies srt file encoding. - Any standard Python encoding is allowed (listed at - https://docs.python.org/3.8/library/codecs.html#standard-encodings) - - Examples - -------- - - >>> from moviepy.video.tools.subtitles import SubtitlesClip - >>> from moviepy.video.io.VideoFileClip import VideoFileClip - >>> generator = lambda text: TextClip(text, font='Georgia-Regular', - ... font_size=24, color='white') - >>> sub = SubtitlesClip("subtitles.srt", generator) - >>> sub = SubtitlesClip("subtitles.srt", generator, encoding='utf-8') - >>> myvideo = VideoFileClip("myvideo.avi") - >>> final = CompositeVideoClip([clip, subtitles]) - >>> final.write_videofile("final.mp4", fps=myvideo.fps) - - """ - - def __init__(self, subtitles, font=None, make_textclip=None, encoding=None): - VideoClip.__init__(self, has_constant_size=False) - - if not isinstance(subtitles, list): - # `subtitles` is a string or path-like object - subtitles = file_to_subtitles(subtitles, encoding=encoding) - - # subtitles = [(map(convert_to_seconds, times), text) - # for times, text in subtitles] - self.subtitles = subtitles - self.textclips = dict() - - self.font = font - - if make_textclip is None: - if self.font is None: - raise ValueError("Argument font is required if make_textclip is None.") - - def make_textclip(txt): - return TextClip( - font=self.font, - text=txt, - font_size=24, - color="#ffffff", - stroke_color="#000000", - stroke_width=0.5, - ) - - self.make_textclip = make_textclip - self.start = 0 - self.duration = max([tb for ((ta, tb), txt) in self.subtitles]) - self.end = self.duration - - def add_textclip_if_none(t): - """Will generate a textclip if it hasn't been generated asked - to generate it yet. If there is no subtitle to show at t, return - false. - """ - sub = [ - ((text_start, text_end), text) - for ((text_start, text_end), text) in self.textclips.keys() - if (text_start <= t < text_end) - ] - if not sub: - sub = [ - ((text_start, text_end), text) - for ((text_start, text_end), text) in self.subtitles - if (text_start <= t < text_end) - ] - if not sub: - return False - sub = sub[0] - if sub not in self.textclips.keys(): - self.textclips[sub] = self.make_textclip(sub[1]) - - return sub - - def make_frame(t): - sub = add_textclip_if_none(t) - return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]]) - - def make_mask_frame(t): - sub = add_textclip_if_none(t) - return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]]) - - self.make_frame = make_frame - hasmask = bool(self.make_textclip("T").mask) - self.mask = VideoClip(make_mask_frame, is_mask=True) if hasmask else None - -
[docs] def in_subclip(self, start_time=None, end_time=None): - """Returns a sequence of [(t1,t2), text] covering all the given subclip - from start_time to end_time. The first and last times will be cropped so as - to be exactly start_time and end_time if possible. - """ - - def is_in_subclip(t1, t2): - try: - return (start_time <= t1 < end_time) or (start_time < t2 <= end_time) - except Exception: - return False - - def try_cropping(t1, t2): - try: - return max(t1, start_time), min(t2, end_time) - except Exception: - return t1, t2 - - return [ - (try_cropping(t1, t2), txt) - for ((t1, t2), txt) in self.subtitles - if is_in_subclip(t1, t2) - ]
- - def __iter__(self): - return iter(self.subtitles) - - def __getitem__(self, k): - return self.subtitles[k] - - def __str__(self): - def to_srt(sub_element): - (start_time, end_time), text = sub_element - formatted_start_time = convert_to_seconds(start_time) - formatted_end_time = convert_to_seconds(end_time) - return "%s - %s\n%s" % (formatted_start_time, formatted_end_time, text) - - return "\n\n".join(to_srt(sub) for sub in self.subtitles) - -
[docs] def match_expr(self, expr): - """Matches a regular expression against the subtitles of the clip.""" - return SubtitlesClip( - [sub for sub in self.subtitles if re.findall(expr, sub[1]) != []] - )
- -
[docs] def write_srt(self, filename): - """Writes an ``.srt`` file with the content of the clip.""" - with open(filename, "w+") as file: - file.write(str(self))
- - -
[docs]@convert_path_to_string("filename") -def file_to_subtitles(filename, encoding=None): - """Converts a srt file into subtitles. - - The returned list is of the form ``[((start_time,end_time),'some text'),...]`` - and can be fed to SubtitlesClip. - - Only works for '.srt' format for the moment. - """ - times_texts = [] - current_times = None - current_text = "" - with open(filename, "r", encoding=encoding) as file: - for line in file: - times = re.findall("([0-9]*:[0-9]*:[0-9]*,[0-9]*)", line) - if times: - current_times = [convert_to_seconds(t) for t in times] - elif line.strip() == "": - times_texts.append((current_times, current_text.strip("\n"))) - current_times, current_text = None, "" - elif current_times: - current_text += line - return times_texts
-
- -
- - - -
- -
-
-
- -
- - - - -
- - -
-
-
- - - - - - - - \ No newline at end of file diff --git a/_sources/developer_guide/contribution_guidelines.rst.txt b/_sources/developer_guide/contribution_guidelines.rst.txt deleted file mode 100644 index 1206e1c1f..000000000 --- a/_sources/developer_guide/contribution_guidelines.rst.txt +++ /dev/null @@ -1,68 +0,0 @@ -.. _contribution_guidelines: - -MoviePy's Contribution Guidelines -================================= - -Communication on GitHub ------------------------ - -- Keep messages on GitHub issues and pull requests on-topic and to the point. Be aware that each comment triggers a notification which gets sent out to a number of people. - - Opinions are OK. - - For longer or more in-depth discussions, use the `MoviePy Gitter `_. If these discussions lead to a decision, like a merge/reject, please leave a message on the relevant MoviePy issue to document the outcome of the discussion/the reason for the decision. -- Do not push any commit that changes the API without prior discussion. - -Preparing for development -------------------------- - -- Fork the official MoviePy repository to your own GitHub account: - Use the "Fork" button in the top right corner of the GitHub interface while viewing `the official MoviePy `_ repository. -- Use your fork as the basis for cloning the repository to your local machine: ``$ git clone URL_TO_YOUR_FORK`` - You can get the appropriate URL (SSH- or HTTPS-based) by using the green "Code" button located at the top right of the repository view while looking at your fork. By default, Git refers to any remote you clone from – i.e. in this case your fork on GitHub – as ``origin``. -- Enter your local clone and add the official MoviePy repository as a second remote, with alias ``upstream``: - ``$ git remote add upstream git@github.com:Zulko/moviepy.git`` (using SSL) _or_ - ``$ git remote add upstream https://github.com/Zulko/moviepy.git`` (using HTTPS). -- Install the library inside a `virtual environment `_ with all dependencies included using ``$ pip install -e ".[optional,doc,test,lint]"`` -- Configure pre-commit hooks running ``$ pre-commit install`` - -Coding conventions, code quality --------------------------------- - -- Respect `PEP8 `_ conventions. -- Add just the "right" amount of comments. Try to write auto-documented code with very explicit variable names. -- If you introduce new functionality or fix a bug, document it in the docstring or with code comments. -- MoviePy's team adopted `pre-commit `_ to run code checks using black, flake8 and isort, so make sure that you've configured the pre-commit hooks with ``pre-commit install``. - -Standard contribution workflow ------------------------------- - -Local development -~~~~~~~~~~~~~~~~~ - -- Keep your local ``master`` branch up-to-date with the official repo's master by periodically fetching/pulling it: - ``$ git pull upstream master`` -- Never make changes on ``master`` directly, but branch off into separate develop branches: - ``$ git checkout --branch YOUR_DEVELOP_BRANCH`` - Ideally, these are given names which function as keywords for what you are working on, and are prefixed with ``fix_`` (for bug fixes), ``feature_`` or something similarly appropriate and descriptive. -- Base any changes you submit on the most recent ``master``. - -More detailed explanation of the last point: - -It is likely that the official repo's ``master`` branch will move on (get updated, have other PRs merged into it) while you are working on your changes. Before creating a pull request, you will have to make sure your changes are not based on outdated code. For this reason, it makes sense to avoid falling "too much behind" while developing by rebasing your local ``master`` branch at intervals. Make sure your ``master`` branch is in sync with the official ``master`` branch (as per the first point), then, while checked into your develop branch, run: ``$ git rebase master`` - -If you **haven't rebased before**, make sure to **familiarise yourself** with the concept. - -Submitting Pull Requests -~~~~~~~~~~~~~~~~~~~~~~~~ - -You do not have to have finished your feature or bug fix before submitting a PR; just mention that it still is a work in progress. - -Before submitting PRs: - -- run the test suite over your code to expose any problems: ``$ pytest`` -- push your local develop branch to your GitHub fork ``$ git push origin YOUR_DEVELOP_BRANCH`` - -When you now look at your forked repo on your GitHub account, you will see GitHub suggest branches for sending pull requests to the official ``Zulko/moviepy`` repository. - -Once you open a PR, you will be presented with a template which you are asked to fill out. You are encouraged to add any additional information which helps provide further context to your changes, and to link to any issues or PRs which your pull request references or is informed by. - -On submitting your PR, an automated test suite runs over your submission, which might take a few minutes to complete. In a next step, a MoviePy maintainer will review your code and, if necessary, help you to get it merge-ready. diff --git a/_sources/developer_guide/developers_install.rst.txt b/_sources/developer_guide/developers_install.rst.txt deleted file mode 100644 index d16d88cb4..000000000 --- a/_sources/developer_guide/developers_install.rst.txt +++ /dev/null @@ -1,51 +0,0 @@ -.. _developers_install: - -Installation for MoviePy developers -====================================== - -.. warning:: - This part is only destined to people who want to build the MoviePy documentation by themself, or to contribute to MoviePy, normal user dont need it. - -In addition to MoviePy main libraries, MoviePy developers will also need to install additional libraries to be able to run MoviePy tests and build the MoviePy documentation. - -Libraries for documentation ------------------------------ - -You can install the libraries required to build documentation with: - -.. code:: bash - - $ (sudo) pip install moviepy[doc] - -Once libraries installed you can build the documentation with: - -.. code:: bash - - $ python setup.py build_docs - - -Libraries for testing and linting -------------------------------------- - -You can install the libraries required for testing and linting with: - -.. code:: bash - - $ (sudo) pip install moviepy[test] - $ (sudo) pip install moviepy[lint] - -Once libraries installed you can test with: - -.. code:: bash - - $ python -m pytest - -And you can lint with : - -.. code:: bash - - $ python -m black . - - - - diff --git a/_sources/developer_guide/index.rst.txt b/_sources/developer_guide/index.rst.txt deleted file mode 100644 index 774c2e448..000000000 --- a/_sources/developer_guide/index.rst.txt +++ /dev/null @@ -1,13 +0,0 @@ -.. _developer_guide: - - -The MoviePy Developers Guide ------------------------------- - -The Developers Guide covers most of the things people wanting to participate to MoviePy development need to know. - -.. toctree:: - :maxdepth: 1 - - developers_install - contribution_guidelines diff --git a/_sources/getting_started/index.rst.txt b/_sources/getting_started/index.rst.txt deleted file mode 100644 index 21b8aaad1..000000000 --- a/_sources/getting_started/index.rst.txt +++ /dev/null @@ -1,18 +0,0 @@ -.. _getting_started: - -Getting started with MoviePy ------------------------------- - -This section explain everything you need to start editing with MoviePy. To go further, have a look at the :ref:`user_guide` and the :ref:`reference_manual`. - - -.. toctree:: - :maxdepth: 1 - - install - quick_presentation - moviepy_10_minutes - docker - updating_to_v2 - FAQ - diff --git a/_sources/getting_started/install.rst.txt b/_sources/getting_started/install.rst.txt deleted file mode 100644 index d4b03d31a..000000000 --- a/_sources/getting_started/install.rst.txt +++ /dev/null @@ -1,88 +0,0 @@ -.. _install: - -Installation -========================== - -Installation is done with ``pip`` if you dont have ``pip`` take a look at `how to install it `_. - -With ``pip`` installed, just type this in a terminal : - -.. code:: bash - - $ (sudo) pip install moviepy - - -.. _install#binaries: - -Installation of additional binaries ------------------------------------- - -MoviePy depends on the software ffmpeg_ for video reading and writing and on ``ffplay`` for video previewing. - -You don't need to worry about ffmpeg_, as it should be automatically downloaded/installed by ImageIO during your first use of MoviePy (it takes a few seconds). - -You do need to worry ``ffplay`` if you plan on using video/audio previewing though. In such case, make sure to have ``ffplay`` installed (it can usually be found alongside ``ffmpeg``) and -make sure it is accessible to Python, or look how to set a custom path (see below). - - -Define custom paths to binaries -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you want to use a specific version of FFMPEG and FFPLAY, you can do so using environment variables. - -There are a couple of environment variables used by MoviePy that allow you to configure custom paths to the external tools. - -To setup any of these variables, the easiest way is to do it in Python before importing objects from MoviePy. For example: - -.. code-block:: python - - import os - os.environ["FFMPEG_BINARY"] = "/path/to/custom/ffmpeg" - os.environ["FFPLAY_BINARY"] = "/path/to/custom/ffplay" - - -Alternatively, after installing the optional dependencies, you can create -a ``.env`` file in your working directory that will be automatically read. -For example - -.. code-block:: ini - - FFMPEG_BINARY=/path/to/custom/ffmpeg - FFPLAY_BINARY=/path/to/custom/ffplay - - -Environment variables -""""""""""""""""""""""" - -There are 2 available environment variables for external binaries : - -``FFMPEG_BINARY`` - Normally you can leave it to its default ('ffmpeg-imageio') in which - case imageio will download the right ffmpeg binary (on first use) and then always use that binary. - - The second option is ``"auto-detect"``. In this case ffmpeg will be whatever - binary is found on the computer: generally ``ffmpeg`` (on Linux/macOS) or ``ffmpeg.exe`` (on Windows). - - Lastly, you can set it to use a binary at a specific location on your disk by specifying the exact path. - - -``FFPLAY_BINARY`` - The default is ``"auto-detect"``. MoviePy will try to find and use the installed ``ffplay`` binary. - - You can set it to use a binary at a specific location on your disk. On Windows, this might look like:: - - os.environ["FFPLAY_BINARY"] = r"C:\Program Files\ffmpeg\ffplay.exe" - - -Verify if MoviePy find binaries -"""""""""""""""""""""""""""""""" -To test if FFmpeg and FFplay are found by MoviePy, in a Python console, you can run : - -.. code-block:: python - - >>> from moviepy.config import check - >>> check() - - -.. _ffmpeg: https://www.ffmpeg.org/download.html - diff --git a/_sources/getting_started/moviepy_10_minutes.rst.txt b/_sources/getting_started/moviepy_10_minutes.rst.txt deleted file mode 100644 index 46298b784..000000000 --- a/_sources/getting_started/moviepy_10_minutes.rst.txt +++ /dev/null @@ -1,264 +0,0 @@ -.. _moviepy_10_minutes: - -MoviePy in 10 Minutes: Creating a Trailer from "Big Buck Bunny" -=============================================================== - -.. note:: - This tutorial aims to be a simple and short introduction for new users wishing to use MoviePy. For a more in-depth exploration of the concepts seen in this tutorial, see :ref:`user_guide`. - -In this tutorial, you will learn the basics of how to use the MoviePy library in just 10 minutes. As an example project for this tutorial, we will create the following trailer for the movie `"Big Buck Bunny." `_. - -.. raw:: html - -
- -
- - -Prerequisites -------------- - -Before we start, make sure you have MoviePy installed. You can install it using pip: - -.. code-block:: shell - - pip install moviepy - - -Also, we will need to gather a few resources such as the original movie, font files, images, etc. -To make it easy, we have prepared a template project you can download directly: - -1. Download :download:`the project template ` and unzip it. -2. Take a look at the resources inside the folder to familiarize yourself. -3. Create a Python script file named ``trailer.py`` in the project directory. - -Now, you are ready to proceed to the next steps. - -Step 1: Import MoviePy and Load the Video ------------------------------------------ - -Let's start by importing the necessary modules and loading the "Big Buck Bunny" video into our Python program: - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 0-10 - -As you see, loading a video file is really easy, but MoviePy isn't limited to video. It can handle images, audio, texts, and even custom animations. - -No matter the kind of resources, ultimately any clip will be either a :py:class:`~moviepy.video.VideoClip.VideoClip` for any visual element, and an :py:class:`~moviepy.audio.AudioClip.AudioClip` for any audio element. - -In this tutorial, we will only see a few of those, but if you want to explore more, you can find an exhaustive list in the user guide about :ref:`loading`. - -Step 2: Extract the Best Scenes -------------------------------- - -To create our trailer, we will focus on presenting the main characters, so we need to extract parts of the movie. -This is a very classic task, so let's turn our main clip into multiple subclips: - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 13-25 - - -Here, we use the ``with_subclip`` method to extract specific scenes from the main video. We provide the start and end times (in seconds or as text with the format ``HH:MM:SS.µS``) for each scene. -The extracted clips are stored in their respective variables (``intro_clip``, ``bird_clip``, etc.). - -Step 3: Take a First Look with Preview --------------------------------------- - -When editing videos, it's often essential to preview the clips to ensure they meet our vision. This allows you to watch the segment you're working on and make any necessary adjustments for the perfect result. - -To do so using MoviePy, you can utilize the ``preview()`` function available for each clip (the complementary ``audio_preview()`` is also available for :py:class:`~moviepy.audio.AudioClip.AudioClip`). - -.. note:: - Note that you will need ``ffplay`` installed and accessible to MoviePy for preview to work. You can check if ``ffplay`` is available by running the command ``python3 -c "from moviepy.config import check;check()"``. - If not, please see :ref:`install#binaries`. - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 28-38 - -By using the preview, you may have noticed that our clips not only contain video but also audio. This is because when loading a video, you not only load the image but also the audio tracks that are turned into :py:class:`~moviepy.audio.AudioClip.AudioClip` and -added to your video clip. - -.. note:: - When previewing, you may encounter video slowing or video/audio shifting. This is not a bug; it's due to the fact that your computer cannot render the preview in real-time. - In such a case, the best course of action is to set the ``fps`` parameter for the ``preview()`` at a lower value to make things easier on your machine. - - -Step 4: Modify a Clip by Cutting Out a Part of It --------------------------------------------------- - -After previewing the clips, we notice that the rodents' scene is a bit long. Let's modify the clip by removing a specific part. It would be nice to remove parts of the scene that we don't need. This is also quite a common task in video-editing. -To do so, we are going to use the ``with_cutout`` method to remove a portion of the clip between ``00:06:00`` to ``00:10:00``. - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 41-54 - -In that particular case, we have used the ``with_cutout``, but this is only one of the many clip manipulation methods starting with ``with_*``. We will see a few others -in this tutorial, but we will miss a lot more. If you want an exhaustive list, go see :ref:`reference_manual`. - -.. note:: - You may have noticed that we have reassigned the ``rodents_clip`` variable instead of just calling a method on it. - This is because in MoviePy, any function starting with ``with_*`` is out-of-place instead of in-place, meaning it does not modify the original data but instead copies it and modifies/returns the copy. - So you need to store the result of the method and, if necessary, reassign the original variable to update your clip. - - -Step 5: Creating Text/Logo Clips ------------------------------------- - -In addition to videos, we often need to work with images and texts. MoviePy offers some specialized kinds of :py:class:`~moviepy.video.VideoClip.VideoClip` specifically for that purpose: ``ImageClip`` and ``TextClip``. - -In our case, we want to create text clips to add text overlays between the video clips. We'll define the font, text content, font size, and color for each text clip. -We also want to create image clips for the "Big Buck Bunny" logo and the "Made with MoviePy" logo and resize them as needed. - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 56-82 - -As you can see, ``ImageClip`` is quite simple, but ``TextClip`` is a rather complicated object. Don't hesitate to explore the arguments it accepts. - -.. note:: - In our example, we have used the ``resized()`` method to resize our image clips. This method works just like any ``with_*`` method, but because resizing is such a common - task, the name has been shortened to ``resized()``. The same is true for ``cropped()`` and ``rotated()``. - -Feel free to experiment with different effects and transitions to achieve the desired trailer effect. - - -Step 6: Timing the clips --------------------------- - -We have all the clips we need, but if we were to combine all those clips into a single one using composition (we will see that in the next step), all our clips would start at the same time and play on top of each other, which is obviously not what we want. -Also, some video clips, like the images and texts, have no endpoint/duration at creation (except if you have provided a duration parameter), which means trying to render them will throw an error as it would result in an infinite video. - -To fix that, we need to specify when a clip should start and stop in the final clip. So, let's start by indicating when each clip must start and end with the appropriate with_* methods. - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 85-111 - -.. note:: - By default, all clips have a start point at ``0``. If a clip has no duration but you set the ``endtime``, then the duration will be calculated for you. The reciprocity is also true. - - So in our case, we either use duration or endtime, depending on what is more practical for each specific case. - -Step 7: Seeing how all clips combine --------------------------------------- - -Now that all our clips are timed, let's get a first idea of how our final clip will look. In video editing, the act of assembling multiple videos into a single one is known as composition. -So, MoviePy offers a special kind of :py:class:`~moviepy.video.VideoClip.VideoClip` dedicated to the act of combining multiple clips into one, the :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip`. - -:py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip` takes an array of clips as input and will play them on top of each other at render time, starting and stopping each clip at its start and end points. - -.. note:: - If possible, :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip` will extract endpoint and size from the biggest/last ending clip. If a clip in the list has no duration, then you will have to manually set the duration of :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip` before rendering. - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 114-136 - - -Step 8: Positioning our clips ------------------------------- - -By looking at this first preview, we see that our clips are pretty well timed, but that the positions of our texts and logo are not satisfying. - -This is because, for now, we have only specified when our clips should appear, and not the position at which they should appear. By default, all clips are positioned from the top left of the video, at ``(0, 0)``. - -All our clips do not have the same sizes (the texts and images are smaller than the videos), and the :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip` takes the size of the biggest clip (so in our case, the size of the videos), -so the texts and images are all in the top left portion of the clip. - -To fix this, we simply have to define the position of our clips in the composition with the method ``with_position``. - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 139-174 - -.. note:: - The position is a tuple with horizontal and vertical position. You can give them as pixels, as strings (``top``, ``left``, ``right``, ``bottom``, ``center``), and even as a percentage by providing - a float and passing the argument ``relative=True``. - -Now, all our clips are in the right place and timed as expected. - - -Step 9: Adding transitions and effects --------------------------------------------- - -So, our clips are timed and placed, but for now, the result is quite raw. It would be nice to have smoother transitions between the clips. -In MoviePy, this is achieved through the use of effects. - -Effects play a crucial role in enhancing the visual and auditory appeal of your video clips. Effects are applied to clips to create transitions, transformations, or modifications, resulting in better-looking videos. -Whether you want to add smooth transitions between clips, alter visual appearance, or manipulate audio properties, MoviePy comes with many existing effects to help you bring your creative vision to life with ease. - -You can find these effects under the namespace ``vfx`` for video effects and ``afx`` for audio effects. - -.. note:: - You can use audio effects on both audio and video clips because when applying audio effects to a video clip, the effect will actually be applied to the video clip's embedded audio clip instead. - -Using an effect is very simple. You just have to call the method ``with_effects`` on your clip and pass an array of effect objects to apply. - -In our case, we will add simple fade-in/out and cross-fade-in/out transitions between our clips, as well as slow down the ``rambo_clip``. - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 177-239 - -Well, this looks a lot nicer! For this tutorial, we want to keep things simple, so we mostly used transitions. However, you can find many different effects and even create your own. -For a more in-depth presentation, see :py:mod:`moviepy.video.fx`, :py:mod:`moviepy.audio.fx`, and :ref:`create_effects`. - -.. note:: - Looking at the result, you may notice that crossfading makes clips go from transparent to opaque, and reciprocally, and wonder how it works. - - We won't get into details, but know that in MoviePy, you can declare some sections of a video clip to be transparent by using masks. Masks are nothing more than - special kinds of video clips that are made of values ranging from ``0`` for a transparent pixel to ``1`` for a fully opaque one. - - For more info, see :ref:`loading#masks`. - - -Step 10: Modifying the appearance of a clip using filters --------------------------------------------------------------- - -Finally, to make it more epic, we will apply a custom filter to our Rambo clip to make the image sepia. -MoviePy does not come with a sepia effect out of the box, and creating a full custom effect is beyond the scope of this tutorial. However, we will see how we can apply a simple filter to our clip using the ``image_transform`` method. - -To understand how filters work, you first need to understand that in MoviePy, a clip frame is nothing more than a numpy ``ndarray`` of shape ``HxWx3``. -This means we can modify how a frame looks like by applying simple math operations. Doing that on all the frames allows us to apply a filter to our clip! - -The "apply to all frames" part is done by the ``image_transform`` method. This method takes a callback function as an argument, and at render time, it will trigger the callback for each frame of the clip, passing the current frame. - -.. warning:: - This is a bit of an advanced usage, and the example involves matrix multiplication. If this is too much for you, you can simply ignore it until you really need to make custom filters, - then go look for a more detailed explanation on how to do filtering (:ref:`modifying#filters`) and create custom effects (:ref:`create_effects`) in the user guide. - - What you need to remember is just that we can apply filters on images. Here we do it mathematically, but you could very well use a library such as Pillow (provided it can understand numpy images) to do the maths for you! - - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 242-283 - - -Step 11: Rendering the final clip to a file --------------------------------------------- - -So, our final clip is ready, and we have made all the cutting and modifications we want. We are now ready to save the final result into a file. In video editing, this operation -is known as rendering. - -Again, we will keep things simple and just do video rendering without much tweaking. In most cases, MoviePy and FFMPEG automatically find the best settings. Take a look at the ``write_videofile`` doc for more info. - -.. literalinclude:: /_static/code/getting_started/moviepy_10_minutes/trailer.py - :language: python - :lines: 286-307 - - -Conclusion ----------- - -Congratulations! You have successfully created a trailer for the movie "Big Buck Bunny" using the MoviePy library. This tutorial covered the basics of MoviePy, including loading videos, trimming scenes, adding effects and transitions, overlaying text, and even a little bit of filtering. - -If you want to dig deeper into MoviePy, we encourage you to try and experiment with this base example by using different effects, transitions, and audio elements to make your trailer truly captivating. -We also encourage you to go and read the :ref:`user_guide`, as well as looking directly at the :ref:`reference_manual`. diff --git a/_sources/getting_started/quick_presentation.rst.txt b/_sources/getting_started/quick_presentation.rst.txt deleted file mode 100644 index da68f00e2..000000000 --- a/_sources/getting_started/quick_presentation.rst.txt +++ /dev/null @@ -1,80 +0,0 @@ -.. _quick_presentation: - -Quick presentation -=================== - -This section explains when MoviePy can be used and how it works. - -Do I need MoviePy? -~~~~~~~~~~~~~~~~~~~ - -Here are a few reasons why you may want to edit videos in Python: - -- You have many videos to process or to compose in a complicated way. -- You want to automate the creation of videos or GIFs on a web server (Django, Flask, etc.) -- You want to automate tedious tasks, like title insertions tracking objects, cutting scenes, making end credits, subtitles, etc... -- You want to code your own video effects to do something no existing video editor can. -- You want to create animations from images generated by another python library (Matplotlib, Mayavi, Gizeh, scikit-images...) - -And here are a few uses for which MoviePy is NOT the best solution: - -- You only need to do frame-by-frame video analysis (with face detection or other fancy stuff). This could be done with MoviePy in association with other libraries, but really, just use imageio_, OpenCV_ or SimpleCV, these are libraries that specialize in these tasks. -- You only want to convert a video file, or turn a series of image files into a movie. In this case it is better to directly call ``ffmpeg`` (or ``avconv`` or ``mencoder``...) it will be faster more memory-efficient than going through MoviePy. - - -Advantages and limitations -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -MoviePy has been developed with the following goals in mind: - -- **Simple and intuitive**. Basic operations can be done in one line. The code is easy to learn and easy to understand for newcomers. -- **Flexible**. You have total control over the frames of the video and audio, and creating your own effects is easy as Py. -- **Portable**. The code uses very common software (Numpy and FFMPEG) and can run on (almost) any machine with (almost) any version of Python. - -For the limitations: MoviePy cannot (yet) stream videos (read from a webcam, or render a video live on a distant machine), and is not really designed for video processing involving many successive frames of a movie (like video stabilization, you'll need another software for that). You can also have memory problems if you use many video, audio, and image sources at the same time (>100), but this will be fixed in future versions. - -Example code -~~~~~~~~~~~~~~ - -In a typical MoviePy script, you load video or audio files, modify them, put them together, and write the final result to a new video file. As an example, let us load a video, lower the volume, add a title in the center of the video for the first ten seconds, and write the result in a file: - -.. literalinclude:: /_static/code/getting_started/quick_presentation/basic_example.py - :language: python - - -How MoviePy works -~~~~~~~~~~~~~~~~~~~ - -MoviePy uses the software ``ffmpeg`` to read and to export video and audio files. It also (optionally) uses ``ffplay`` to allow for video previewing. - -Internally, the representation and manipulation of the different media is done using Python's fast numerical library Numpy. Advanced effects and enhancements also use ``pillow`` library. - -.. image:: /_static/medias/getting_started/explanations.jpeg - :width: 570px - :align: center - - -The central concept, the clips -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The central object of MoviePy is the :py:class:`the clips `, with either :py:class:`~moviepy.audio.AudioClip.AudioClip` for any audio element, or :py:class:`~moviepy.video.VideoClip.VideoClip` for any visual element. Clips really are the base unit of MoviePy, everything you do is with and on them. - -Clips can be created from more than just videos or audios though. They can also be created from an image, a text, a custom animation, a folder of images, and even a simple lambda function ! - -To create your final video, what you will do is essentially : - -#. Load different resources as clips (see :ref:`loading`) -#. Modify them (see :ref:`modifying`) -#. Mixing them into one final clip (see :ref:`compositing`) -#. Render them into a file (see :ref:`rendering`) - -Of course, MoviePy offer multiple handy solution and tools to facilitate all thoses steps, and let you add new ones by writing your own effects (see :ref:`create_effects`) ! - - -.. _imageio: https://imageio.github.io/ -.. _OpenCV: http://opencv.org/ - - - - - diff --git a/_sources/getting_started/updating_to_v2.rst.txt b/_sources/getting_started/updating_to_v2.rst.txt deleted file mode 100644 index 92db38411..000000000 --- a/_sources/getting_started/updating_to_v2.rst.txt +++ /dev/null @@ -1,117 +0,0 @@ -.. _updating_to_v2: - -Updating from v1.X to v2.X -========================== - -MoviePy v2.0 has undergone some large changes with the aim of making the API more consistent -and intuitive. In order to do so multiple breaking changes have been made. -Therefore, there is a high likelihood that your pre-v2.0 programs will not run without -some changes. - -Dropping support of Python 2 ------------------------------ -Starting with version 2.0 MoviePy **no longer supports Python 2**, which makes sense since Python 2 reached its end of life over three years ago. -Focusing on Python 3.7+ allows MoviePy to take advantage of the latest language features and improvements while maintaining code quality and security. - -Users are encouraged to upgrade to a supported version of Python to continue using MoviePy. - -``moviepy.editor`` supression and simplified importation ---------------------------------------------------------- -Before v2.0, it was advised to import from ``moviepy.editor`` whenever you needed to do some sort of manual operations, -such as previewing or hand editing, because the ``editor`` package was in charge of a lot of magic and initialization, making your life -easier, at the cost of initializing some complex modules like ``pygame``. - -With version 2.0, the ``moviepy.editor`` namespace simply no longer exists. You simply import everything from ``moviepy`` like this: :: - - from moviepy import * # Simple and nice, the __all__ is set in moviepy so only usefull things will be loaded - from moviepy import VideoFileClip # You can also import only the things you really need - - -Renaming and API unification ------------------------------- - -One of the most significant change has been renaming all ``.set_`` methods to ``.with_``. More generally, almost all the method modifying a clip now starts -by ``with_``, indicating that they work 'outplace', meaning they do not directly modify the clip, but instead copy it, modify this copy, and return the updated copy, -leaving the original clip untouched. - -We advise you to check in your code for any call of method from ``Clip`` objects and check for a matching ``.with_`` equivalent. - - -Massive refactoring of effects -------------------------------- - -With version 2.0, effects have undergone massive changes and refactoring. Though the logic of why and when applying effects remain globally the same, -the implementation changed quite heavily. - -If you used any kind of effects, you will have to update your code! - -Moving effects from function to classes -"""""""""""""""""""""""""""""""""""""""""""""" - -MoviePy version 2.0 introduces a more structured and object-oriented approach to handling effects. In previous versions, effects were simply Python functions that manipulated video clips or images. -However, in version 2.0 and onwards, effects are now represented as classes. - -This shift allows for better organization, encapsulation, and reusability of code, as well as more comprehensible code. Each effect is now encapsulated within its own class, making it easier to manage and modify. - -All effects are now implementing the :py:class:`~moviepy.Effect.Effect` abstract class, so if you ever used any custom effect. - -If you ever write your own effect, you will have to migrate to the new object implementation. For more info see :ref:`create_effects`. - -Moving from ``clip.fx`` to :py:meth:`~moviepy.Clip.Clip.with_effects` -"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" - -Moving from function to object also meant MoviePy had to drop the method ``Clip.fx`` previously used to apply effects in favor of the new :py:meth:`~moviepy.Clip.Clip.with_effects`. - -For more info about how to use effects with v2.0, see :ref:`modifying#effects`. - -Removing effects as clip methods -"""""""""""""""""""""""""""""""""" - -Before version 2.0, when importing from ``moviepy.editor`` the effects was added as clip class method at runtime. This is no longer the case. - -If you previously used effect by calling them as clips method, you must now use :py:meth:`~moviepy.Clip.Clip.with_effects`. - -Dropping many external dependencies and unifying environment -------------------------------------------------------------- - -With v1.0, MoviePy relied on many optional external dependencies, trying to gracefully dropback from one library to another in the event one of them was missing, eventually dropping some features when no library was available. -This resulted in complex and hard to maintain code for the MoviePy team, as well as fragmented and hard to understand environment for the users. - -With v2.0 the MoviePy team tried to offer a simpler, smaller and more unified dependicy list, with focusing on ``pillow`` for all complex image manipulation, and dropping altogether the usage of ``ImageMagick``, ``PyGame``, ``OpenCV``, ``scipy``, ``scikit``, and a few others. - -Removed features ------------------ - -Sadly, reducing the scope of MoviePy and limiting the external libraries mean that some features had to be removed, if you used any of the following features, you will have to create your own replacement: - -- ``moviepy.video.tools.tracking`` -- ``moviepy.video.tools.segmenting`` -- ``moviepy.video.io.sliders`` - -Miscleanous signature changes ------------------------------- - -When updating the API and moving from previous libraries to ``pillow``, some miscleanous changes also happen, meaning some methods signatures may have changed. - -You should check the new signatures if you used any of the following: - -- ``TextClip`` some arguments named have changed and a path to a font file is now needed at object instanciation -- ``clip.resize`` is now ``clip.resized`` -- ``clip.crop`` is now ``clip.cropped`` -- ``clip.rotate`` is now ``clip.rotated`` -- Any previous ``Clip`` method not starting by ``with_`` now probably start with it - - -Why all thoses changes and updating from v1.0 to v2.0? -------------------------------------------------------- - -You may ask yourself why all thoses changes was introduced? The answer is: time. - -MoviePy have seen many evolution since his first release and have became kind of a complex project, with ambitions sometimes too important in regards to available manpower on the development team. -Over time, as in any project, inconsistencies have been introduced in order to support new functionnalities without breaking current API, and some initial choices no longer reflected the current state of things. - -Due to multiple factors, MoviePy have also undergone a long period of time during which the main version distributed through PiPy diverged from the GitHub distributed version, introducing confusion and chaos. - -In a global effort to simplify futur development and limit confusion by providing a unified environment, it has been decided to release a new major version including the many evolutions than happened over the years, which meant breaking changes, and so a new major version released was required. - -For thoses interested in how and why all of thoses things have been decided, you can find a lot of the discussion that went into this in GitHub issues `#1874 `_, `#1089 `_ and `#2012 `_. \ No newline at end of file diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt deleted file mode 100644 index cc2ca3745..000000000 --- a/_sources/index.rst.txt +++ /dev/null @@ -1,141 +0,0 @@ -:notoc: - -*********************** -MoviePy documentation -*********************** - -.. image:: /_static/medias/logo.png - :width: 50% - :align: center - -**Date**: |today| **Version**: |version| - -**Useful links**: -`Binary Installers `__ | -`Source Repository `__ | -`Issues & Ideas `__ | -`Q&A Support `__ | - -MoviePy is the `Python `__ reference tool for video editing automation! - -It's an open source, MIT-licensed library offering user-friendly video editing -and manipulation tools for the `Python `__ programming language. - -.. grid:: 1 2 2 2 - :gutter: 4 - :padding: 2 2 0 0 - :class-container: sd-text-center - - .. grid-item-card:: Getting started - :img-top: _static/medias/index_getting_started.svg - :class-card: intro-card - :shadow: md - - New to *MoviePy*? Check out the getting started guides. They contain instructions - to install *MoviePy'* as well as introduction concepts and tutorials. - - +++ - - .. button-ref:: getting_started - :ref-type: ref - :click-parent: - :color: secondary - :expand: - - To the starting guide - - .. grid-item-card:: User guide - :img-top: _static/medias/index_user_guide.svg - :class-card: intro-card - :shadow: md - - The user guide provides in-depth information on the - key concepts of *MoviePy* with useful background information and explanation. - - +++ - - .. button-ref:: user_guide - :ref-type: ref - :click-parent: - :color: secondary - :expand: - - To the user guide - - .. grid-item-card:: API reference - :img-top: _static/medias/index_api.svg - :class-card: intro-card - :shadow: md - - The reference guide contains a detailed description of - the *MoviePy* API. The reference describes how the methods work and which parameters can - be used. It assumes that you have an understanding of the key concepts. - - +++ - - .. button-ref:: reference_manual - :ref-type: ref - :click-parent: - :color: secondary - :expand: - - To the reference guide - - .. grid-item-card:: Developer guide - :img-top: _static/medias/index_contribute.svg - :class-card: intro-card - :shadow: md - - Saw a typo in the documentation? Want to improve - existing functionalities? The contributing guidelines will guide - you through the process of improving moviepy. - - +++ - - .. button-ref:: developer_guide - :ref-type: ref - :click-parent: - :color: secondary - :expand: - - To the development guide - - - - -Contribute! --------------- - -MoviePy is an open source software originally written by Zulko_ and released under the MIT licence. It works on Windows, Mac, and Linux. - -.. raw:: html - - - - - - - -.. toctree:: - :maxdepth: 3 - :hidden: - :titlesonly: - - - getting_started/index - user_guide/index - reference/index - developer_guide/index - - -.. _PyPI: https://pypi.python.org/pypi/moviepy -.. _Zulko: https://github.com/Zulko/ -.. _Stackoverflow: https://stackoverflow.com/ -.. _Github: https://github.com/Zulko/moviepy -.. _Reddit: https://www.reddit.com/r/moviepy/ \ No newline at end of file diff --git a/_sources/reference/index.rst.txt b/_sources/reference/index.rst.txt deleted file mode 100644 index e81677c5c..000000000 --- a/_sources/reference/index.rst.txt +++ /dev/null @@ -1,18 +0,0 @@ -.. _reference_manual: - - -Api Reference -================ - -This is the definitive place to find all the details on MoviePy API documentation. - -For a more beginner introduction, please see :ref:`getting_started`, for a more detailed explanations of the different concepts in MoviePy, -see :ref:`user_guide`. - -.. autosummary:: - :toctree: reference - :recursive: - :template: custom_autosummary/module.rst - - moviepy - diff --git a/_sources/reference/reference/moviepy.Clip.Clip.rst.txt b/_sources/reference/reference/moviepy.Clip.Clip.rst.txt deleted file mode 100644 index 28def5876..000000000 --- a/_sources/reference/reference/moviepy.Clip.Clip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.Clip.Clip -================= - -.. currentmodule:: moviepy.Clip - -.. autoclass:: Clip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.Clip.rst.txt b/_sources/reference/reference/moviepy.Clip.rst.txt deleted file mode 100644 index 0c60c9ec1..000000000 --- a/_sources/reference/reference/moviepy.Clip.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.Clip -============ - - -.. automodule:: moviepy.Clip - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - Clip - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.Effect.rst.txt b/_sources/reference/reference/moviepy.Effect.rst.txt deleted file mode 100644 index da6a6eb69..000000000 --- a/_sources/reference/reference/moviepy.Effect.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.Effect -============== - - -.. automodule:: moviepy.Effect - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.AudioClip.AudioArrayClip.rst.txt b/_sources/reference/reference/moviepy.audio.AudioClip.AudioArrayClip.rst.txt deleted file mode 100644 index a77a0b762..000000000 --- a/_sources/reference/reference/moviepy.audio.AudioClip.AudioArrayClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.AudioClip.AudioArrayClip -====================================== - -.. currentmodule:: moviepy.audio.AudioClip - -.. autoclass:: AudioArrayClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.AudioClip.AudioClip.rst.txt b/_sources/reference/reference/moviepy.audio.AudioClip.AudioClip.rst.txt deleted file mode 100644 index 8e12eafcd..000000000 --- a/_sources/reference/reference/moviepy.audio.AudioClip.AudioClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.AudioClip.AudioClip -================================= - -.. currentmodule:: moviepy.audio.AudioClip - -.. autoclass:: AudioClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.AudioClip.CompositeAudioClip.rst.txt b/_sources/reference/reference/moviepy.audio.AudioClip.CompositeAudioClip.rst.txt deleted file mode 100644 index 434e04586..000000000 --- a/_sources/reference/reference/moviepy.audio.AudioClip.CompositeAudioClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.AudioClip.CompositeAudioClip -========================================== - -.. currentmodule:: moviepy.audio.AudioClip - -.. autoclass:: CompositeAudioClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.AudioClip.concatenate_audioclips.rst.txt b/_sources/reference/reference/moviepy.audio.AudioClip.concatenate_audioclips.rst.txt deleted file mode 100644 index 7ffc3da96..000000000 --- a/_sources/reference/reference/moviepy.audio.AudioClip.concatenate_audioclips.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.audio.AudioClip.concatenate\_audioclips -=============================================== - -.. currentmodule:: moviepy.audio.AudioClip - -.. autofunction:: concatenate_audioclips \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.AudioClip.rst.txt b/_sources/reference/reference/moviepy.audio.AudioClip.rst.txt deleted file mode 100644 index 83ef46c30..000000000 --- a/_sources/reference/reference/moviepy.audio.AudioClip.rst.txt +++ /dev/null @@ -1,44 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.AudioClip -======================= - - -.. automodule:: moviepy.audio.AudioClip - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - AudioArrayClip - AudioClip - CompositeAudioClip - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - concatenate_audioclips - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.fx.AudioDelay.rst.txt b/_sources/reference/reference/moviepy.audio.fx.AudioDelay.rst.txt deleted file mode 100644 index ac3e54835..000000000 --- a/_sources/reference/reference/moviepy.audio.fx.AudioDelay.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.fx.AudioDelay -=========================== - - -.. automodule:: moviepy.audio.fx.AudioDelay - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.fx.AudioFadeIn.rst.txt b/_sources/reference/reference/moviepy.audio.fx.AudioFadeIn.rst.txt deleted file mode 100644 index 1bb24dc47..000000000 --- a/_sources/reference/reference/moviepy.audio.fx.AudioFadeIn.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.fx.AudioFadeIn -============================ - - -.. automodule:: moviepy.audio.fx.AudioFadeIn - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.fx.AudioFadeOut.rst.txt b/_sources/reference/reference/moviepy.audio.fx.AudioFadeOut.rst.txt deleted file mode 100644 index 3b72a023e..000000000 --- a/_sources/reference/reference/moviepy.audio.fx.AudioFadeOut.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.fx.AudioFadeOut -============================= - - -.. automodule:: moviepy.audio.fx.AudioFadeOut - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.fx.AudioLoop.rst.txt b/_sources/reference/reference/moviepy.audio.fx.AudioLoop.rst.txt deleted file mode 100644 index 769a8091d..000000000 --- a/_sources/reference/reference/moviepy.audio.fx.AudioLoop.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.fx.AudioLoop -========================== - - -.. automodule:: moviepy.audio.fx.AudioLoop - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.fx.AudioNormalize.rst.txt b/_sources/reference/reference/moviepy.audio.fx.AudioNormalize.rst.txt deleted file mode 100644 index dfa6fec20..000000000 --- a/_sources/reference/reference/moviepy.audio.fx.AudioNormalize.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.fx.AudioNormalize -=============================== - - -.. automodule:: moviepy.audio.fx.AudioNormalize - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.fx.MultiplyStereoVolume.rst.txt b/_sources/reference/reference/moviepy.audio.fx.MultiplyStereoVolume.rst.txt deleted file mode 100644 index b28722070..000000000 --- a/_sources/reference/reference/moviepy.audio.fx.MultiplyStereoVolume.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.fx.MultiplyStereoVolume -===================================== - - -.. automodule:: moviepy.audio.fx.MultiplyStereoVolume - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.fx.MultiplyVolume.rst.txt b/_sources/reference/reference/moviepy.audio.fx.MultiplyVolume.rst.txt deleted file mode 100644 index 8e12304b9..000000000 --- a/_sources/reference/reference/moviepy.audio.fx.MultiplyVolume.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.fx.MultiplyVolume -=============================== - - -.. automodule:: moviepy.audio.fx.MultiplyVolume - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.fx.rst.txt b/_sources/reference/reference/moviepy.audio.fx.rst.txt deleted file mode 100644 index f0241ce92..000000000 --- a/_sources/reference/reference/moviepy.audio.fx.rst.txt +++ /dev/null @@ -1,56 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.fx -================ - - -.. automodule:: moviepy.audio.fx - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.audio.fx.AudioDelay - - - moviepy.audio.fx.AudioFadeIn - - - moviepy.audio.fx.AudioFadeOut - - - moviepy.audio.fx.AudioLoop - - - moviepy.audio.fx.AudioNormalize - - - moviepy.audio.fx.MultiplyStereoVolume - - - moviepy.audio.fx.MultiplyVolume - - diff --git a/_sources/reference/reference/moviepy.audio.io.AudioFileClip.AudioFileClip.rst.txt b/_sources/reference/reference/moviepy.audio.io.AudioFileClip.AudioFileClip.rst.txt deleted file mode 100644 index 53453a810..000000000 --- a/_sources/reference/reference/moviepy.audio.io.AudioFileClip.AudioFileClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io.AudioFileClip.AudioFileClip -============================================ - -.. currentmodule:: moviepy.audio.io.AudioFileClip - -.. autoclass:: AudioFileClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.io.AudioFileClip.rst.txt b/_sources/reference/reference/moviepy.audio.io.AudioFileClip.rst.txt deleted file mode 100644 index 9c46ed6a4..000000000 --- a/_sources/reference/reference/moviepy.audio.io.AudioFileClip.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io.AudioFileClip -============================== - - -.. automodule:: moviepy.audio.io.AudioFileClip - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - AudioFileClip - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.rst.txt b/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.rst.txt deleted file mode 100644 index 69b677229..000000000 --- a/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io.ffmpeg\_audiowriter.FFMPEG\_AudioWriter -======================================================== - -.. currentmodule:: moviepy.audio.io.ffmpeg_audiowriter - -.. autoclass:: FFMPEG_AudioWriter - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite.rst.txt b/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite.rst.txt deleted file mode 100644 index 74b8f0bda..000000000 --- a/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.audio.io.ffmpeg\_audiowriter.ffmpeg\_audiowrite -======================================================= - -.. currentmodule:: moviepy.audio.io.ffmpeg_audiowriter - -.. autofunction:: ffmpeg_audiowrite \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.rst.txt b/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.rst.txt deleted file mode 100644 index e73e67fa9..000000000 --- a/_sources/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.rst.txt +++ /dev/null @@ -1,42 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io.ffmpeg\_audiowriter -==================================== - - -.. automodule:: moviepy.audio.io.ffmpeg_audiowriter - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - FFMPEG_AudioWriter - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - ffmpeg_audiowrite - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.rst.txt b/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.rst.txt deleted file mode 100644 index 1f6e0b4da..000000000 --- a/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io.ffplay\_audiopreviewer.FFPLAY\_AudioPreviewer -============================================================== - -.. currentmodule:: moviepy.audio.io.ffplay_audiopreviewer - -.. autoclass:: FFPLAY_AudioPreviewer - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview.rst.txt b/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview.rst.txt deleted file mode 100644 index 42bad1064..000000000 --- a/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.audio.io.ffplay\_audiopreviewer.ffplay\_audiopreview -============================================================ - -.. currentmodule:: moviepy.audio.io.ffplay_audiopreviewer - -.. autofunction:: ffplay_audiopreview \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.rst.txt b/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.rst.txt deleted file mode 100644 index f6f3d2dee..000000000 --- a/_sources/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.rst.txt +++ /dev/null @@ -1,42 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io.ffplay\_audiopreviewer -======================================= - - -.. automodule:: moviepy.audio.io.ffplay_audiopreviewer - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - FFPLAY_AudioPreviewer - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - ffplay_audiopreview - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.io.readers.FFMPEG_AudioReader.rst.txt b/_sources/reference/reference/moviepy.audio.io.readers.FFMPEG_AudioReader.rst.txt deleted file mode 100644 index 30b8d9a8a..000000000 --- a/_sources/reference/reference/moviepy.audio.io.readers.FFMPEG_AudioReader.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io.readers.FFMPEG\_AudioReader -============================================ - -.. currentmodule:: moviepy.audio.io.readers - -.. autoclass:: FFMPEG_AudioReader - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.io.readers.rst.txt b/_sources/reference/reference/moviepy.audio.io.readers.rst.txt deleted file mode 100644 index 053a9aa5d..000000000 --- a/_sources/reference/reference/moviepy.audio.io.readers.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io.readers -======================== - - -.. automodule:: moviepy.audio.io.readers - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - FFMPEG_AudioReader - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.io.rst.txt b/_sources/reference/reference/moviepy.audio.io.rst.txt deleted file mode 100644 index eff6ddb2d..000000000 --- a/_sources/reference/reference/moviepy.audio.io.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.io -================ - - -.. automodule:: moviepy.audio.io - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.audio.io.AudioFileClip - - - moviepy.audio.io.ffmpeg_audiowriter - - - moviepy.audio.io.ffplay_audiopreviewer - - - moviepy.audio.io.readers - - diff --git a/_sources/reference/reference/moviepy.audio.rst.txt b/_sources/reference/reference/moviepy.audio.rst.txt deleted file mode 100644 index 6c46706fa..000000000 --- a/_sources/reference/reference/moviepy.audio.rst.txt +++ /dev/null @@ -1,47 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio -============= - - -.. automodule:: moviepy.audio - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.audio.AudioClip - - - moviepy.audio.fx - - - moviepy.audio.io - - - moviepy.audio.tools - - diff --git a/_sources/reference/reference/moviepy.audio.tools.cuts.find_audio_period.rst.txt b/_sources/reference/reference/moviepy.audio.tools.cuts.find_audio_period.rst.txt deleted file mode 100644 index 6f0d71fb0..000000000 --- a/_sources/reference/reference/moviepy.audio.tools.cuts.find_audio_period.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.audio.tools.cuts.find\_audio\_period -============================================ - -.. currentmodule:: moviepy.audio.tools.cuts - -.. autofunction:: find_audio_period \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.audio.tools.cuts.rst.txt b/_sources/reference/reference/moviepy.audio.tools.cuts.rst.txt deleted file mode 100644 index 6e778b08e..000000000 --- a/_sources/reference/reference/moviepy.audio.tools.cuts.rst.txt +++ /dev/null @@ -1,34 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.tools.cuts -======================== - - -.. automodule:: moviepy.audio.tools.cuts - - - - - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - find_audio_period - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.audio.tools.rst.txt b/_sources/reference/reference/moviepy.audio.tools.rst.txt deleted file mode 100644 index 93695ca9a..000000000 --- a/_sources/reference/reference/moviepy.audio.tools.rst.txt +++ /dev/null @@ -1,38 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.audio.tools -=================== - - -.. automodule:: moviepy.audio.tools - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.audio.tools.cuts - - diff --git a/_sources/reference/reference/moviepy.config.check.rst.txt b/_sources/reference/reference/moviepy.config.check.rst.txt deleted file mode 100644 index 5fa0287e6..000000000 --- a/_sources/reference/reference/moviepy.config.check.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.config.check -==================== - -.. currentmodule:: moviepy.config - -.. autofunction:: check \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.config.rst.txt b/_sources/reference/reference/moviepy.config.rst.txt deleted file mode 100644 index 2b5cf333c..000000000 --- a/_sources/reference/reference/moviepy.config.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.config -============== - - -.. automodule:: moviepy.config - - - - - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - check - try_cmd - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.config.try_cmd.rst.txt b/_sources/reference/reference/moviepy.config.try_cmd.rst.txt deleted file mode 100644 index d625d7ab7..000000000 --- a/_sources/reference/reference/moviepy.config.try_cmd.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.config.try\_cmd -======================= - -.. currentmodule:: moviepy.config - -.. autofunction:: try_cmd \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.add_mask_if_none.rst.txt b/_sources/reference/reference/moviepy.decorators.add_mask_if_none.rst.txt deleted file mode 100644 index 93d50ef83..000000000 --- a/_sources/reference/reference/moviepy.decorators.add_mask_if_none.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.add\_mask\_if\_none -====================================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: add_mask_if_none \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.apply_to_audio.rst.txt b/_sources/reference/reference/moviepy.decorators.apply_to_audio.rst.txt deleted file mode 100644 index 45a1431a4..000000000 --- a/_sources/reference/reference/moviepy.decorators.apply_to_audio.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.apply\_to\_audio -=================================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: apply_to_audio \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.apply_to_mask.rst.txt b/_sources/reference/reference/moviepy.decorators.apply_to_mask.rst.txt deleted file mode 100644 index fa6c41eb0..000000000 --- a/_sources/reference/reference/moviepy.decorators.apply_to_mask.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.apply\_to\_mask -================================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: apply_to_mask \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.audio_video_effect.rst.txt b/_sources/reference/reference/moviepy.decorators.audio_video_effect.rst.txt deleted file mode 100644 index d1de6a9f0..000000000 --- a/_sources/reference/reference/moviepy.decorators.audio_video_effect.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.audio\_video\_effect -======================================= - -.. currentmodule:: moviepy.decorators - -.. autofunction:: audio_video_effect \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.convert_masks_to_RGB.rst.txt b/_sources/reference/reference/moviepy.decorators.convert_masks_to_RGB.rst.txt deleted file mode 100644 index 2edc66d32..000000000 --- a/_sources/reference/reference/moviepy.decorators.convert_masks_to_RGB.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.convert\_masks\_to\_RGB -========================================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: convert_masks_to_RGB \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.convert_parameter_to_seconds.rst.txt b/_sources/reference/reference/moviepy.decorators.convert_parameter_to_seconds.rst.txt deleted file mode 100644 index 232d8eb92..000000000 --- a/_sources/reference/reference/moviepy.decorators.convert_parameter_to_seconds.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.convert\_parameter\_to\_seconds -================================================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: convert_parameter_to_seconds \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.convert_path_to_string.rst.txt b/_sources/reference/reference/moviepy.decorators.convert_path_to_string.rst.txt deleted file mode 100644 index cebda55a7..000000000 --- a/_sources/reference/reference/moviepy.decorators.convert_path_to_string.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.convert\_path\_to\_string -============================================ - -.. currentmodule:: moviepy.decorators - -.. autofunction:: convert_path_to_string \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.outplace.rst.txt b/_sources/reference/reference/moviepy.decorators.outplace.rst.txt deleted file mode 100644 index ea8e373b0..000000000 --- a/_sources/reference/reference/moviepy.decorators.outplace.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.outplace -=========================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: outplace \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.preprocess_args.rst.txt b/_sources/reference/reference/moviepy.decorators.preprocess_args.rst.txt deleted file mode 100644 index 8f07f4774..000000000 --- a/_sources/reference/reference/moviepy.decorators.preprocess_args.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.preprocess\_args -=================================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: preprocess_args \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.requires_duration.rst.txt b/_sources/reference/reference/moviepy.decorators.requires_duration.rst.txt deleted file mode 100644 index 86f672892..000000000 --- a/_sources/reference/reference/moviepy.decorators.requires_duration.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.requires\_duration -===================================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: requires_duration \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.requires_fps.rst.txt b/_sources/reference/reference/moviepy.decorators.requires_fps.rst.txt deleted file mode 100644 index 35d088f8e..000000000 --- a/_sources/reference/reference/moviepy.decorators.requires_fps.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.requires\_fps -================================ - -.. currentmodule:: moviepy.decorators - -.. autofunction:: requires_fps \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.decorators.rst.txt b/_sources/reference/reference/moviepy.decorators.rst.txt deleted file mode 100644 index fcabd9247..000000000 --- a/_sources/reference/reference/moviepy.decorators.rst.txt +++ /dev/null @@ -1,36 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.decorators -================== - - -.. automodule:: moviepy.decorators - - - - - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - convert_parameter_to_seconds - convert_path_to_string - preprocess_args - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.decorators.use_clip_fps_by_default.rst.txt b/_sources/reference/reference/moviepy.decorators.use_clip_fps_by_default.rst.txt deleted file mode 100644 index c1b1caa5f..000000000 --- a/_sources/reference/reference/moviepy.decorators.use_clip_fps_by_default.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.decorators.use\_clip\_fps\_by\_default -============================================== - -.. currentmodule:: moviepy.decorators - -.. autofunction:: use_clip_fps_by_default \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.rst.txt b/_sources/reference/reference/moviepy.rst.txt deleted file mode 100644 index c688e8c50..000000000 --- a/_sources/reference/reference/moviepy.rst.txt +++ /dev/null @@ -1,57 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy -======= - - -.. automodule:: moviepy - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.Clip - - - moviepy.Effect - - - moviepy.audio - - - moviepy.config - - - moviepy.decorators - - - moviepy.tools - - - - moviepy.video - - diff --git a/_sources/reference/reference/moviepy.tools.close_all_clips.rst.txt b/_sources/reference/reference/moviepy.tools.close_all_clips.rst.txt deleted file mode 100644 index 3a555e366..000000000 --- a/_sources/reference/reference/moviepy.tools.close_all_clips.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.tools.close\_all\_clips -=============================== - -.. currentmodule:: moviepy.tools - -.. autofunction:: close_all_clips \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.tools.convert_to_seconds.rst.txt b/_sources/reference/reference/moviepy.tools.convert_to_seconds.rst.txt deleted file mode 100644 index 13494f138..000000000 --- a/_sources/reference/reference/moviepy.tools.convert_to_seconds.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.tools.convert\_to\_seconds -================================== - -.. currentmodule:: moviepy.tools - -.. autofunction:: convert_to_seconds \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.tools.cross_platform_popen_params.rst.txt b/_sources/reference/reference/moviepy.tools.cross_platform_popen_params.rst.txt deleted file mode 100644 index a480a5762..000000000 --- a/_sources/reference/reference/moviepy.tools.cross_platform_popen_params.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.tools.cross\_platform\_popen\_params -============================================ - -.. currentmodule:: moviepy.tools - -.. autofunction:: cross_platform_popen_params \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.tools.deprecated_version_of.rst.txt b/_sources/reference/reference/moviepy.tools.deprecated_version_of.rst.txt deleted file mode 100644 index 61fc2f026..000000000 --- a/_sources/reference/reference/moviepy.tools.deprecated_version_of.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.tools.deprecated\_version\_of -===================================== - -.. currentmodule:: moviepy.tools - -.. autofunction:: deprecated_version_of \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.tools.find_extension.rst.txt b/_sources/reference/reference/moviepy.tools.find_extension.rst.txt deleted file mode 100644 index 29c02d695..000000000 --- a/_sources/reference/reference/moviepy.tools.find_extension.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.tools.find\_extension -============================= - -.. currentmodule:: moviepy.tools - -.. autofunction:: find_extension \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.tools.no_display_available.rst.txt b/_sources/reference/reference/moviepy.tools.no_display_available.rst.txt deleted file mode 100644 index e4a29fa00..000000000 --- a/_sources/reference/reference/moviepy.tools.no_display_available.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.tools.no\_display\_available -==================================== - -.. currentmodule:: moviepy.tools - -.. autofunction:: no_display_available \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.tools.rst.txt b/_sources/reference/reference/moviepy.tools.rst.txt deleted file mode 100644 index 84dc299fd..000000000 --- a/_sources/reference/reference/moviepy.tools.rst.txt +++ /dev/null @@ -1,40 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.tools -============= - - -.. automodule:: moviepy.tools - - - - - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - close_all_clips - convert_to_seconds - cross_platform_popen_params - deprecated_version_of - find_extension - no_display_available - subprocess_call - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.tools.subprocess_call.rst.txt b/_sources/reference/reference/moviepy.tools.subprocess_call.rst.txt deleted file mode 100644 index ab7f5a1e3..000000000 --- a/_sources/reference/reference/moviepy.tools.subprocess_call.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.tools.subprocess\_call -============================== - -.. currentmodule:: moviepy.tools - -.. autofunction:: subprocess_call \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.VideoClip.BitmapClip.rst.txt b/_sources/reference/reference/moviepy.video.VideoClip.BitmapClip.rst.txt deleted file mode 100644 index d77f3d78b..000000000 --- a/_sources/reference/reference/moviepy.video.VideoClip.BitmapClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.VideoClip.BitmapClip -================================== - -.. currentmodule:: moviepy.video.VideoClip - -.. autoclass:: BitmapClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.VideoClip.ColorClip.rst.txt b/_sources/reference/reference/moviepy.video.VideoClip.ColorClip.rst.txt deleted file mode 100644 index 15c771424..000000000 --- a/_sources/reference/reference/moviepy.video.VideoClip.ColorClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.VideoClip.ColorClip -================================= - -.. currentmodule:: moviepy.video.VideoClip - -.. autoclass:: ColorClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.VideoClip.DataVideoClip.rst.txt b/_sources/reference/reference/moviepy.video.VideoClip.DataVideoClip.rst.txt deleted file mode 100644 index f235c1abe..000000000 --- a/_sources/reference/reference/moviepy.video.VideoClip.DataVideoClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.VideoClip.DataVideoClip -===================================== - -.. currentmodule:: moviepy.video.VideoClip - -.. autoclass:: DataVideoClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.VideoClip.ImageClip.rst.txt b/_sources/reference/reference/moviepy.video.VideoClip.ImageClip.rst.txt deleted file mode 100644 index fed9cb6d8..000000000 --- a/_sources/reference/reference/moviepy.video.VideoClip.ImageClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.VideoClip.ImageClip -================================= - -.. currentmodule:: moviepy.video.VideoClip - -.. autoclass:: ImageClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.VideoClip.TextClip.rst.txt b/_sources/reference/reference/moviepy.video.VideoClip.TextClip.rst.txt deleted file mode 100644 index b6ac2f00f..000000000 --- a/_sources/reference/reference/moviepy.video.VideoClip.TextClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.VideoClip.TextClip -================================ - -.. currentmodule:: moviepy.video.VideoClip - -.. autoclass:: TextClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.VideoClip.UpdatedVideoClip.rst.txt b/_sources/reference/reference/moviepy.video.VideoClip.UpdatedVideoClip.rst.txt deleted file mode 100644 index 89a72880d..000000000 --- a/_sources/reference/reference/moviepy.video.VideoClip.UpdatedVideoClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.VideoClip.UpdatedVideoClip -======================================== - -.. currentmodule:: moviepy.video.VideoClip - -.. autoclass:: UpdatedVideoClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.VideoClip.VideoClip.rst.txt b/_sources/reference/reference/moviepy.video.VideoClip.VideoClip.rst.txt deleted file mode 100644 index 36f1fa4da..000000000 --- a/_sources/reference/reference/moviepy.video.VideoClip.VideoClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.VideoClip.VideoClip -================================= - -.. currentmodule:: moviepy.video.VideoClip - -.. autoclass:: VideoClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.VideoClip.rst.txt b/_sources/reference/reference/moviepy.video.VideoClip.rst.txt deleted file mode 100644 index bc9f32671..000000000 --- a/_sources/reference/reference/moviepy.video.VideoClip.rst.txt +++ /dev/null @@ -1,41 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.VideoClip -======================= - - -.. automodule:: moviepy.video.VideoClip - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - BitmapClip - ColorClip - DataVideoClip - ImageClip - TextClip - UpdatedVideoClip - VideoClip - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.rst.txt b/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.rst.txt deleted file mode 100644 index a6d97c996..000000000 --- a/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip -=============================================================== - -.. currentmodule:: moviepy.video.compositing.CompositeVideoClip - -.. autoclass:: CompositeVideoClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.clips_array.rst.txt b/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.clips_array.rst.txt deleted file mode 100644 index d2fa7baac..000000000 --- a/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.clips_array.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.compositing.CompositeVideoClip.clips\_array -========================================================= - -.. currentmodule:: moviepy.video.compositing.CompositeVideoClip - -.. autofunction:: clips_array \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips.rst.txt b/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips.rst.txt deleted file mode 100644 index b6a0cae41..000000000 --- a/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.compositing.CompositeVideoClip.concatenate\_videoclips -==================================================================== - -.. currentmodule:: moviepy.video.compositing.CompositeVideoClip - -.. autofunction:: concatenate_videoclips \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.rst.txt b/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.rst.txt deleted file mode 100644 index c0a54b4a4..000000000 --- a/_sources/reference/reference/moviepy.video.compositing.CompositeVideoClip.rst.txt +++ /dev/null @@ -1,43 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.compositing.CompositeVideoClip -============================================ - - -.. automodule:: moviepy.video.compositing.CompositeVideoClip - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - CompositeVideoClip - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - clips_array - concatenate_videoclips - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.compositing.rst.txt b/_sources/reference/reference/moviepy.video.compositing.rst.txt deleted file mode 100644 index 973665c79..000000000 --- a/_sources/reference/reference/moviepy.video.compositing.rst.txt +++ /dev/null @@ -1,38 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.compositing -========================= - - -.. automodule:: moviepy.video.compositing - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.video.compositing.CompositeVideoClip - - diff --git a/_sources/reference/reference/moviepy.video.fx.AccelDecel.rst.txt b/_sources/reference/reference/moviepy.video.fx.AccelDecel.rst.txt deleted file mode 100644 index f4468639d..000000000 --- a/_sources/reference/reference/moviepy.video.fx.AccelDecel.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.AccelDecel -=========================== - - -.. automodule:: moviepy.video.fx.AccelDecel - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.BlackAndWhite.rst.txt b/_sources/reference/reference/moviepy.video.fx.BlackAndWhite.rst.txt deleted file mode 100644 index 190086075..000000000 --- a/_sources/reference/reference/moviepy.video.fx.BlackAndWhite.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.BlackAndWhite -============================== - - -.. automodule:: moviepy.video.fx.BlackAndWhite - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Blink.rst.txt b/_sources/reference/reference/moviepy.video.fx.Blink.rst.txt deleted file mode 100644 index bbadb6a05..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Blink.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Blink -====================== - - -.. automodule:: moviepy.video.fx.Blink - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Crop.rst.txt b/_sources/reference/reference/moviepy.video.fx.Crop.rst.txt deleted file mode 100644 index 99e3a209d..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Crop.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Crop -===================== - - -.. automodule:: moviepy.video.fx.Crop - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.CrossFadeIn.rst.txt b/_sources/reference/reference/moviepy.video.fx.CrossFadeIn.rst.txt deleted file mode 100644 index f64b8f7d4..000000000 --- a/_sources/reference/reference/moviepy.video.fx.CrossFadeIn.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.CrossFadeIn -============================ - - -.. automodule:: moviepy.video.fx.CrossFadeIn - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.CrossFadeOut.rst.txt b/_sources/reference/reference/moviepy.video.fx.CrossFadeOut.rst.txt deleted file mode 100644 index e77a197ea..000000000 --- a/_sources/reference/reference/moviepy.video.fx.CrossFadeOut.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.CrossFadeOut -============================= - - -.. automodule:: moviepy.video.fx.CrossFadeOut - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.EvenSize.rst.txt b/_sources/reference/reference/moviepy.video.fx.EvenSize.rst.txt deleted file mode 100644 index fc7d34e7d..000000000 --- a/_sources/reference/reference/moviepy.video.fx.EvenSize.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.EvenSize -========================= - - -.. automodule:: moviepy.video.fx.EvenSize - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.FadeIn.rst.txt b/_sources/reference/reference/moviepy.video.fx.FadeIn.rst.txt deleted file mode 100644 index c47b10170..000000000 --- a/_sources/reference/reference/moviepy.video.fx.FadeIn.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.FadeIn -======================= - - -.. automodule:: moviepy.video.fx.FadeIn - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.FadeOut.rst.txt b/_sources/reference/reference/moviepy.video.fx.FadeOut.rst.txt deleted file mode 100644 index 5dffda0bd..000000000 --- a/_sources/reference/reference/moviepy.video.fx.FadeOut.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.FadeOut -======================== - - -.. automodule:: moviepy.video.fx.FadeOut - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Freeze.rst.txt b/_sources/reference/reference/moviepy.video.fx.Freeze.rst.txt deleted file mode 100644 index 985de8a9a..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Freeze.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Freeze -======================= - - -.. automodule:: moviepy.video.fx.Freeze - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.FreezeRegion.rst.txt b/_sources/reference/reference/moviepy.video.fx.FreezeRegion.rst.txt deleted file mode 100644 index 97463c0d1..000000000 --- a/_sources/reference/reference/moviepy.video.fx.FreezeRegion.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.FreezeRegion -============================= - - -.. automodule:: moviepy.video.fx.FreezeRegion - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.GammaCorrection.rst.txt b/_sources/reference/reference/moviepy.video.fx.GammaCorrection.rst.txt deleted file mode 100644 index 76e15d1e7..000000000 --- a/_sources/reference/reference/moviepy.video.fx.GammaCorrection.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.GammaCorrection -================================ - - -.. automodule:: moviepy.video.fx.GammaCorrection - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.HeadBlur.rst.txt b/_sources/reference/reference/moviepy.video.fx.HeadBlur.rst.txt deleted file mode 100644 index 71f85fb38..000000000 --- a/_sources/reference/reference/moviepy.video.fx.HeadBlur.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.HeadBlur -========================= - - -.. automodule:: moviepy.video.fx.HeadBlur - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.InvertColors.rst.txt b/_sources/reference/reference/moviepy.video.fx.InvertColors.rst.txt deleted file mode 100644 index 79d4648cd..000000000 --- a/_sources/reference/reference/moviepy.video.fx.InvertColors.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.InvertColors -============================= - - -.. automodule:: moviepy.video.fx.InvertColors - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Loop.rst.txt b/_sources/reference/reference/moviepy.video.fx.Loop.rst.txt deleted file mode 100644 index 247a9d4e9..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Loop.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Loop -===================== - - -.. automodule:: moviepy.video.fx.Loop - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.LumContrast.rst.txt b/_sources/reference/reference/moviepy.video.fx.LumContrast.rst.txt deleted file mode 100644 index fdd2a5044..000000000 --- a/_sources/reference/reference/moviepy.video.fx.LumContrast.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.LumContrast -============================ - - -.. automodule:: moviepy.video.fx.LumContrast - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.MakeLoopable.rst.txt b/_sources/reference/reference/moviepy.video.fx.MakeLoopable.rst.txt deleted file mode 100644 index b02e53c26..000000000 --- a/_sources/reference/reference/moviepy.video.fx.MakeLoopable.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.MakeLoopable -============================= - - -.. automodule:: moviepy.video.fx.MakeLoopable - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Margin.rst.txt b/_sources/reference/reference/moviepy.video.fx.Margin.rst.txt deleted file mode 100644 index 5eca80f9c..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Margin.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Margin -======================= - - -.. automodule:: moviepy.video.fx.Margin - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.MaskColor.rst.txt b/_sources/reference/reference/moviepy.video.fx.MaskColor.rst.txt deleted file mode 100644 index d8b5ff3b2..000000000 --- a/_sources/reference/reference/moviepy.video.fx.MaskColor.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.MaskColor -========================== - - -.. automodule:: moviepy.video.fx.MaskColor - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.MasksAnd.rst.txt b/_sources/reference/reference/moviepy.video.fx.MasksAnd.rst.txt deleted file mode 100644 index 1d6fbd39b..000000000 --- a/_sources/reference/reference/moviepy.video.fx.MasksAnd.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.MasksAnd -========================= - - -.. automodule:: moviepy.video.fx.MasksAnd - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.MasksOr.rst.txt b/_sources/reference/reference/moviepy.video.fx.MasksOr.rst.txt deleted file mode 100644 index c83535742..000000000 --- a/_sources/reference/reference/moviepy.video.fx.MasksOr.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.MasksOr -======================== - - -.. automodule:: moviepy.video.fx.MasksOr - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.MirrorX.rst.txt b/_sources/reference/reference/moviepy.video.fx.MirrorX.rst.txt deleted file mode 100644 index 604f6d937..000000000 --- a/_sources/reference/reference/moviepy.video.fx.MirrorX.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.MirrorX -======================== - - -.. automodule:: moviepy.video.fx.MirrorX - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.MirrorY.rst.txt b/_sources/reference/reference/moviepy.video.fx.MirrorY.rst.txt deleted file mode 100644 index 25de1dce1..000000000 --- a/_sources/reference/reference/moviepy.video.fx.MirrorY.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.MirrorY -======================== - - -.. automodule:: moviepy.video.fx.MirrorY - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.MultiplyColor.rst.txt b/_sources/reference/reference/moviepy.video.fx.MultiplyColor.rst.txt deleted file mode 100644 index 190a7585f..000000000 --- a/_sources/reference/reference/moviepy.video.fx.MultiplyColor.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.MultiplyColor -============================== - - -.. automodule:: moviepy.video.fx.MultiplyColor - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.MultiplySpeed.rst.txt b/_sources/reference/reference/moviepy.video.fx.MultiplySpeed.rst.txt deleted file mode 100644 index 792374378..000000000 --- a/_sources/reference/reference/moviepy.video.fx.MultiplySpeed.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.MultiplySpeed -============================== - - -.. automodule:: moviepy.video.fx.MultiplySpeed - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Painting.rst.txt b/_sources/reference/reference/moviepy.video.fx.Painting.rst.txt deleted file mode 100644 index 46dde60bd..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Painting.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Painting -========================= - - -.. automodule:: moviepy.video.fx.Painting - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Resize.rst.txt b/_sources/reference/reference/moviepy.video.fx.Resize.rst.txt deleted file mode 100644 index d8b60ef9b..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Resize.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Resize -======================= - - -.. automodule:: moviepy.video.fx.Resize - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Rotate.rst.txt b/_sources/reference/reference/moviepy.video.fx.Rotate.rst.txt deleted file mode 100644 index 8fecb8632..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Rotate.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Rotate -======================= - - -.. automodule:: moviepy.video.fx.Rotate - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.Scroll.rst.txt b/_sources/reference/reference/moviepy.video.fx.Scroll.rst.txt deleted file mode 100644 index 5f581076d..000000000 --- a/_sources/reference/reference/moviepy.video.fx.Scroll.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.Scroll -======================= - - -.. automodule:: moviepy.video.fx.Scroll - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.SlideIn.rst.txt b/_sources/reference/reference/moviepy.video.fx.SlideIn.rst.txt deleted file mode 100644 index e4c2ebfcb..000000000 --- a/_sources/reference/reference/moviepy.video.fx.SlideIn.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.SlideIn -======================== - - -.. automodule:: moviepy.video.fx.SlideIn - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.SlideOut.rst.txt b/_sources/reference/reference/moviepy.video.fx.SlideOut.rst.txt deleted file mode 100644 index 760bf53ba..000000000 --- a/_sources/reference/reference/moviepy.video.fx.SlideOut.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.SlideOut -========================= - - -.. automodule:: moviepy.video.fx.SlideOut - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.SuperSample.rst.txt b/_sources/reference/reference/moviepy.video.fx.SuperSample.rst.txt deleted file mode 100644 index 5aa30bd37..000000000 --- a/_sources/reference/reference/moviepy.video.fx.SuperSample.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.SuperSample -============================ - - -.. automodule:: moviepy.video.fx.SuperSample - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.TimeMirror.rst.txt b/_sources/reference/reference/moviepy.video.fx.TimeMirror.rst.txt deleted file mode 100644 index f8d723836..000000000 --- a/_sources/reference/reference/moviepy.video.fx.TimeMirror.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.TimeMirror -=========================== - - -.. automodule:: moviepy.video.fx.TimeMirror - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.TimeSymmetrize.rst.txt b/_sources/reference/reference/moviepy.video.fx.TimeSymmetrize.rst.txt deleted file mode 100644 index 016701cb7..000000000 --- a/_sources/reference/reference/moviepy.video.fx.TimeSymmetrize.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx.TimeSymmetrize -=============================== - - -.. automodule:: moviepy.video.fx.TimeSymmetrize - :inherited-members: - - - - - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.fx.rst.txt b/_sources/reference/reference/moviepy.video.fx.rst.txt deleted file mode 100644 index 160979573..000000000 --- a/_sources/reference/reference/moviepy.video.fx.rst.txt +++ /dev/null @@ -1,137 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.fx -================ - - -.. automodule:: moviepy.video.fx - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.video.fx.AccelDecel - - - moviepy.video.fx.BlackAndWhite - - - moviepy.video.fx.Blink - - - moviepy.video.fx.Crop - - - moviepy.video.fx.CrossFadeIn - - - moviepy.video.fx.CrossFadeOut - - - moviepy.video.fx.EvenSize - - - moviepy.video.fx.FadeIn - - - moviepy.video.fx.FadeOut - - - moviepy.video.fx.Freeze - - - moviepy.video.fx.FreezeRegion - - - moviepy.video.fx.GammaCorrection - - - moviepy.video.fx.HeadBlur - - - moviepy.video.fx.InvertColors - - - moviepy.video.fx.Loop - - - moviepy.video.fx.LumContrast - - - moviepy.video.fx.MakeLoopable - - - moviepy.video.fx.Margin - - - moviepy.video.fx.MaskColor - - - moviepy.video.fx.MasksAnd - - - moviepy.video.fx.MasksOr - - - moviepy.video.fx.MirrorX - - - moviepy.video.fx.MirrorY - - - moviepy.video.fx.MultiplyColor - - - moviepy.video.fx.MultiplySpeed - - - moviepy.video.fx.Painting - - - moviepy.video.fx.Resize - - - moviepy.video.fx.Rotate - - - moviepy.video.fx.Scroll - - - moviepy.video.fx.SlideIn - - - moviepy.video.fx.SlideOut - - - moviepy.video.fx.SuperSample - - - moviepy.video.fx.TimeMirror - - - moviepy.video.fx.TimeSymmetrize - - diff --git a/_sources/reference/reference/moviepy.video.io.ImageSequenceClip.ImageSequenceClip.rst.txt b/_sources/reference/reference/moviepy.video.io.ImageSequenceClip.ImageSequenceClip.rst.txt deleted file mode 100644 index 7d66366a7..000000000 --- a/_sources/reference/reference/moviepy.video.io.ImageSequenceClip.ImageSequenceClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ImageSequenceClip.ImageSequenceClip -==================================================== - -.. currentmodule:: moviepy.video.io.ImageSequenceClip - -.. autoclass:: ImageSequenceClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ImageSequenceClip.rst.txt b/_sources/reference/reference/moviepy.video.io.ImageSequenceClip.rst.txt deleted file mode 100644 index 382d6234d..000000000 --- a/_sources/reference/reference/moviepy.video.io.ImageSequenceClip.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ImageSequenceClip -================================== - - -.. automodule:: moviepy.video.io.ImageSequenceClip - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - ImageSequenceClip - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.io.VideoFileClip.VideoFileClip.rst.txt b/_sources/reference/reference/moviepy.video.io.VideoFileClip.VideoFileClip.rst.txt deleted file mode 100644 index afe3e2a32..000000000 --- a/_sources/reference/reference/moviepy.video.io.VideoFileClip.VideoFileClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.VideoFileClip.VideoFileClip -============================================ - -.. currentmodule:: moviepy.video.io.VideoFileClip - -.. autoclass:: VideoFileClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.VideoFileClip.rst.txt b/_sources/reference/reference/moviepy.video.io.VideoFileClip.rst.txt deleted file mode 100644 index 2fe8ca538..000000000 --- a/_sources/reference/reference/moviepy.video.io.VideoFileClip.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.VideoFileClip -============================== - - -.. automodule:: moviepy.video.io.VideoFileClip - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - VideoFileClip - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.io.display_in_notebook.HTML2.rst.txt b/_sources/reference/reference/moviepy.video.io.display_in_notebook.HTML2.rst.txt deleted file mode 100644 index 723253680..000000000 --- a/_sources/reference/reference/moviepy.video.io.display_in_notebook.HTML2.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.display\_in\_notebook.HTML2 -============================================ - -.. currentmodule:: moviepy.video.io.display_in_notebook - -.. autofunction:: HTML2 \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.display_in_notebook.display_in_notebook.rst.txt b/_sources/reference/reference/moviepy.video.io.display_in_notebook.display_in_notebook.rst.txt deleted file mode 100644 index c85a8c75b..000000000 --- a/_sources/reference/reference/moviepy.video.io.display_in_notebook.display_in_notebook.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.display\_in\_notebook.display\_in\_notebook -============================================================ - -.. currentmodule:: moviepy.video.io.display_in_notebook - -.. autofunction:: display_in_notebook \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.display_in_notebook.html_embed.rst.txt b/_sources/reference/reference/moviepy.video.io.display_in_notebook.html_embed.rst.txt deleted file mode 100644 index 5e99be8e1..000000000 --- a/_sources/reference/reference/moviepy.video.io.display_in_notebook.html_embed.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.display\_in\_notebook.html\_embed -================================================== - -.. currentmodule:: moviepy.video.io.display_in_notebook - -.. autofunction:: html_embed \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.display_in_notebook.rst.txt b/_sources/reference/reference/moviepy.video.io.display_in_notebook.rst.txt deleted file mode 100644 index 7d35a5bb1..000000000 --- a/_sources/reference/reference/moviepy.video.io.display_in_notebook.rst.txt +++ /dev/null @@ -1,36 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.display\_in\_notebook -====================================== - - -.. automodule:: moviepy.video.io.display_in_notebook - - - - - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - HTML2 - display_in_notebook - html_embed - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.rst.txt deleted file mode 100644 index b49939e53..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ffmpeg\_reader.FFMPEG\_VideoReader -=================================================== - -.. currentmodule:: moviepy.video.io.ffmpeg_reader - -.. autoclass:: FFMPEG_VideoReader - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.rst.txt deleted file mode 100644 index ec691db3a..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ffmpeg\_reader.FFmpegInfosParser -================================================= - -.. currentmodule:: moviepy.video.io.ffmpeg_reader - -.. autoclass:: FFmpegInfosParser - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos.rst.txt deleted file mode 100644 index 8a138a991..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_reader.ffmpeg\_parse\_infos -==================================================== - -.. currentmodule:: moviepy.video.io.ffmpeg_reader - -.. autofunction:: ffmpeg_parse_infos \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_read_image.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_read_image.rst.txt deleted file mode 100644 index b4e7655d2..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_read_image.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_reader.ffmpeg\_read\_image -=================================================== - -.. currentmodule:: moviepy.video.io.ffmpeg_reader - -.. autofunction:: ffmpeg_read_image \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.rst.txt deleted file mode 100644 index 14e22d3c9..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_reader.rst.txt +++ /dev/null @@ -1,44 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ffmpeg\_reader -=============================== - - -.. automodule:: moviepy.video.io.ffmpeg_reader - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - FFMPEG_VideoReader - FFmpegInfosParser - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - ffmpeg_parse_infos - ffmpeg_read_image - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio.rst.txt deleted file mode 100644 index 4bb121616..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_tools.ffmpeg\_extract\_audio -===================================================== - -.. currentmodule:: moviepy.video.io.ffmpeg_tools - -.. autofunction:: ffmpeg_extract_audio \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip.rst.txt deleted file mode 100644 index 0f033e006..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_tools.ffmpeg\_extract\_subclip -======================================================= - -.. currentmodule:: moviepy.video.io.ffmpeg_tools - -.. autofunction:: ffmpeg_extract_subclip \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio.rst.txt deleted file mode 100644 index 0168d48d7..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_tools.ffmpeg\_merge\_video\_audio -========================================================== - -.. currentmodule:: moviepy.video.io.ffmpeg_tools - -.. autofunction:: ffmpeg_merge_video_audio \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_resize.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_resize.rst.txt deleted file mode 100644 index e09959c10..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_resize.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_tools.ffmpeg\_resize -============================================= - -.. currentmodule:: moviepy.video.io.ffmpeg_tools - -.. autofunction:: ffmpeg_resize \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video.rst.txt deleted file mode 100644 index 22f407bb6..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_tools.ffmpeg\_stabilize\_video -======================================================= - -.. currentmodule:: moviepy.video.io.ffmpeg_tools - -.. autofunction:: ffmpeg_stabilize_video \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.rst.txt deleted file mode 100644 index 5393709a5..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_tools.rst.txt +++ /dev/null @@ -1,38 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ffmpeg\_tools -============================== - - -.. automodule:: moviepy.video.io.ffmpeg_tools - - - - - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - ffmpeg_extract_audio - ffmpeg_extract_subclip - ffmpeg_merge_video_audio - ffmpeg_resize - ffmpeg_stabilize_video - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.rst.txt deleted file mode 100644 index 6caa92f16..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ffmpeg\_writer.FFMPEG\_VideoWriter -=================================================== - -.. currentmodule:: moviepy.video.io.ffmpeg_writer - -.. autoclass:: FFMPEG_VideoWriter - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_image.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_image.rst.txt deleted file mode 100644 index e11775eec..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_image.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_writer.ffmpeg\_write\_image -==================================================== - -.. currentmodule:: moviepy.video.io.ffmpeg_writer - -.. autofunction:: ffmpeg_write_image \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_video.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_video.rst.txt deleted file mode 100644 index ef007c365..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_video.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffmpeg\_writer.ffmpeg\_write\_video -==================================================== - -.. currentmodule:: moviepy.video.io.ffmpeg_writer - -.. autofunction:: ffmpeg_write_video \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.rst.txt b/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.rst.txt deleted file mode 100644 index 183788edc..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffmpeg_writer.rst.txt +++ /dev/null @@ -1,43 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ffmpeg\_writer -=============================== - - -.. automodule:: moviepy.video.io.ffmpeg_writer - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - FFMPEG_VideoWriter - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - ffmpeg_write_image - ffmpeg_write_video - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.rst.txt b/_sources/reference/reference/moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.rst.txt deleted file mode 100644 index 2401a070a..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ffplay\_previewer.FFPLAY\_VideoPreviewer -========================================================= - -.. currentmodule:: moviepy.video.io.ffplay_previewer - -.. autoclass:: FFPLAY_VideoPreviewer - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffplay_previewer.ffplay_preview_video.rst.txt b/_sources/reference/reference/moviepy.video.io.ffplay_previewer.ffplay_preview_video.rst.txt deleted file mode 100644 index ea8b91376..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffplay_previewer.ffplay_preview_video.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.ffplay\_previewer.ffplay\_preview\_video -========================================================= - -.. currentmodule:: moviepy.video.io.ffplay_previewer - -.. autofunction:: ffplay_preview_video \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.ffplay_previewer.rst.txt b/_sources/reference/reference/moviepy.video.io.ffplay_previewer.rst.txt deleted file mode 100644 index f88a43983..000000000 --- a/_sources/reference/reference/moviepy.video.io.ffplay_previewer.rst.txt +++ /dev/null @@ -1,42 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.ffplay\_previewer -================================== - - -.. automodule:: moviepy.video.io.ffplay_previewer - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - FFPLAY_VideoPreviewer - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - ffplay_preview_video - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.io.gif_writers.rst.txt b/_sources/reference/reference/moviepy.video.io.gif_writers.rst.txt deleted file mode 100644 index 0e2f6578d..000000000 --- a/_sources/reference/reference/moviepy.video.io.gif_writers.rst.txt +++ /dev/null @@ -1,34 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io.gif\_writers -============================= - - -.. automodule:: moviepy.video.io.gif_writers - - - - - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - write_gif_with_imageio - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.io.gif_writers.write_gif_with_imageio.rst.txt b/_sources/reference/reference/moviepy.video.io.gif_writers.write_gif_with_imageio.rst.txt deleted file mode 100644 index db632f4a9..000000000 --- a/_sources/reference/reference/moviepy.video.io.gif_writers.write_gif_with_imageio.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.io.gif\_writers.write\_gif\_with\_imageio -======================================================= - -.. currentmodule:: moviepy.video.io.gif_writers - -.. autofunction:: write_gif_with_imageio \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.io.rst.txt b/_sources/reference/reference/moviepy.video.io.rst.txt deleted file mode 100644 index 523f29998..000000000 --- a/_sources/reference/reference/moviepy.video.io.rst.txt +++ /dev/null @@ -1,59 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.io -================ - - -.. automodule:: moviepy.video.io - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.video.io.ImageSequenceClip - - - moviepy.video.io.VideoFileClip - - - moviepy.video.io.display_in_notebook - - - moviepy.video.io.ffmpeg_reader - - - moviepy.video.io.ffmpeg_tools - - - moviepy.video.io.ffmpeg_writer - - - moviepy.video.io.ffplay_previewer - - - moviepy.video.io.gif_writers - - diff --git a/_sources/reference/reference/moviepy.video.rst.txt b/_sources/reference/reference/moviepy.video.rst.txt deleted file mode 100644 index 8f175f0c7..000000000 --- a/_sources/reference/reference/moviepy.video.rst.txt +++ /dev/null @@ -1,50 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video -============= - - -.. automodule:: moviepy.video - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.video.VideoClip - - - moviepy.video.compositing - - - moviepy.video.fx - - - moviepy.video.io - - - moviepy.video.tools - - diff --git a/_sources/reference/reference/moviepy.video.tools.credits.CreditsClip.rst.txt b/_sources/reference/reference/moviepy.video.tools.credits.CreditsClip.rst.txt deleted file mode 100644 index a1b6d6ba3..000000000 --- a/_sources/reference/reference/moviepy.video.tools.credits.CreditsClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.credits.CreditsClip -======================================= - -.. currentmodule:: moviepy.video.tools.credits - -.. autoclass:: CreditsClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.credits.rst.txt b/_sources/reference/reference/moviepy.video.tools.credits.rst.txt deleted file mode 100644 index ea1cfe1cf..000000000 --- a/_sources/reference/reference/moviepy.video.tools.credits.rst.txt +++ /dev/null @@ -1,35 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.credits -=========================== - - -.. automodule:: moviepy.video.tools.credits - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - CreditsClip - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.tools.cuts.FramesMatch.rst.txt b/_sources/reference/reference/moviepy.video.tools.cuts.FramesMatch.rst.txt deleted file mode 100644 index a39e6ee12..000000000 --- a/_sources/reference/reference/moviepy.video.tools.cuts.FramesMatch.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.cuts.FramesMatch -==================================== - -.. currentmodule:: moviepy.video.tools.cuts - -.. autoclass:: FramesMatch - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.cuts.FramesMatches.rst.txt b/_sources/reference/reference/moviepy.video.tools.cuts.FramesMatches.rst.txt deleted file mode 100644 index ee04b9f31..000000000 --- a/_sources/reference/reference/moviepy.video.tools.cuts.FramesMatches.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.cuts.FramesMatches -====================================== - -.. currentmodule:: moviepy.video.tools.cuts - -.. autoclass:: FramesMatches - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.cuts.detect_scenes.rst.txt b/_sources/reference/reference/moviepy.video.tools.cuts.detect_scenes.rst.txt deleted file mode 100644 index e983f573d..000000000 --- a/_sources/reference/reference/moviepy.video.tools.cuts.detect_scenes.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.tools.cuts.detect\_scenes -======================================= - -.. currentmodule:: moviepy.video.tools.cuts - -.. autofunction:: detect_scenes \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.cuts.find_video_period.rst.txt b/_sources/reference/reference/moviepy.video.tools.cuts.find_video_period.rst.txt deleted file mode 100644 index 92ca54a45..000000000 --- a/_sources/reference/reference/moviepy.video.tools.cuts.find_video_period.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.tools.cuts.find\_video\_period -============================================ - -.. currentmodule:: moviepy.video.tools.cuts - -.. autofunction:: find_video_period \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.cuts.rst.txt b/_sources/reference/reference/moviepy.video.tools.cuts.rst.txt deleted file mode 100644 index 18274da4c..000000000 --- a/_sources/reference/reference/moviepy.video.tools.cuts.rst.txt +++ /dev/null @@ -1,44 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.cuts -======================== - - -.. automodule:: moviepy.video.tools.cuts - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - FramesMatch - FramesMatches - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - detect_scenes - find_video_period - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.tools.drawing.blit.rst.txt b/_sources/reference/reference/moviepy.video.tools.drawing.blit.rst.txt deleted file mode 100644 index e648dc394..000000000 --- a/_sources/reference/reference/moviepy.video.tools.drawing.blit.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.tools.drawing.blit -================================ - -.. currentmodule:: moviepy.video.tools.drawing - -.. autofunction:: blit \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.drawing.circle.rst.txt b/_sources/reference/reference/moviepy.video.tools.drawing.circle.rst.txt deleted file mode 100644 index 58cbf089c..000000000 --- a/_sources/reference/reference/moviepy.video.tools.drawing.circle.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.tools.drawing.circle -================================== - -.. currentmodule:: moviepy.video.tools.drawing - -.. autofunction:: circle \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.drawing.color_gradient.rst.txt b/_sources/reference/reference/moviepy.video.tools.drawing.color_gradient.rst.txt deleted file mode 100644 index 7f193a6b5..000000000 --- a/_sources/reference/reference/moviepy.video.tools.drawing.color_gradient.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.tools.drawing.color\_gradient -=========================================== - -.. currentmodule:: moviepy.video.tools.drawing - -.. autofunction:: color_gradient \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.drawing.color_split.rst.txt b/_sources/reference/reference/moviepy.video.tools.drawing.color_split.rst.txt deleted file mode 100644 index d7203fc32..000000000 --- a/_sources/reference/reference/moviepy.video.tools.drawing.color_split.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.tools.drawing.color\_split -======================================== - -.. currentmodule:: moviepy.video.tools.drawing - -.. autofunction:: color_split \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.drawing.rst.txt b/_sources/reference/reference/moviepy.video.tools.drawing.rst.txt deleted file mode 100644 index 0c2e01bed..000000000 --- a/_sources/reference/reference/moviepy.video.tools.drawing.rst.txt +++ /dev/null @@ -1,37 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.drawing -=========================== - - -.. automodule:: moviepy.video.tools.drawing - - - - - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - blit - circle - color_gradient - color_split - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.tools.interpolators.Interpolator.rst.txt b/_sources/reference/reference/moviepy.video.tools.interpolators.Interpolator.rst.txt deleted file mode 100644 index cd663df2f..000000000 --- a/_sources/reference/reference/moviepy.video.tools.interpolators.Interpolator.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.interpolators.Interpolator -============================================== - -.. currentmodule:: moviepy.video.tools.interpolators - -.. autoclass:: Interpolator - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.interpolators.Trajectory.rst.txt b/_sources/reference/reference/moviepy.video.tools.interpolators.Trajectory.rst.txt deleted file mode 100644 index 0d7ffd0e4..000000000 --- a/_sources/reference/reference/moviepy.video.tools.interpolators.Trajectory.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.interpolators.Trajectory -============================================ - -.. currentmodule:: moviepy.video.tools.interpolators - -.. autoclass:: Trajectory - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.interpolators.rst.txt b/_sources/reference/reference/moviepy.video.tools.interpolators.rst.txt deleted file mode 100644 index 879362237..000000000 --- a/_sources/reference/reference/moviepy.video.tools.interpolators.rst.txt +++ /dev/null @@ -1,36 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.interpolators -================================= - - -.. automodule:: moviepy.video.tools.interpolators - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - Interpolator - Trajectory - - - - - - - - - - - - - - - diff --git a/_sources/reference/reference/moviepy.video.tools.rst.txt b/_sources/reference/reference/moviepy.video.tools.rst.txt deleted file mode 100644 index 12fe08f4a..000000000 --- a/_sources/reference/reference/moviepy.video.tools.rst.txt +++ /dev/null @@ -1,50 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools -=================== - - -.. automodule:: moviepy.video.tools - - - - - - - - - - - - - - - - - - - -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom_autosummary/module.rst - :recursive: - - - moviepy.video.tools.credits - - - moviepy.video.tools.cuts - - - moviepy.video.tools.drawing - - - moviepy.video.tools.interpolators - - - moviepy.video.tools.subtitles - - diff --git a/_sources/reference/reference/moviepy.video.tools.subtitles.SubtitlesClip.rst.txt b/_sources/reference/reference/moviepy.video.tools.subtitles.SubtitlesClip.rst.txt deleted file mode 100644 index 39ce716e9..000000000 --- a/_sources/reference/reference/moviepy.video.tools.subtitles.SubtitlesClip.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -.. custom class to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.subtitles.SubtitlesClip -=========================================== - -.. currentmodule:: moviepy.video.tools.subtitles - -.. autoclass:: SubtitlesClip - :members: - - \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.subtitles.file_to_subtitles.rst.txt b/_sources/reference/reference/moviepy.video.tools.subtitles.file_to_subtitles.rst.txt deleted file mode 100644 index bf247ff4e..000000000 --- a/_sources/reference/reference/moviepy.video.tools.subtitles.file_to_subtitles.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -moviepy.video.tools.subtitles.file\_to\_subtitles -================================================= - -.. currentmodule:: moviepy.video.tools.subtitles - -.. autofunction:: file_to_subtitles \ No newline at end of file diff --git a/_sources/reference/reference/moviepy.video.tools.subtitles.rst.txt b/_sources/reference/reference/moviepy.video.tools.subtitles.rst.txt deleted file mode 100644 index 4bd069816..000000000 --- a/_sources/reference/reference/moviepy.video.tools.subtitles.rst.txt +++ /dev/null @@ -1,42 +0,0 @@ -.. custom module to enable complete documentation of every function - see https://stackoverflow.com/a/62613202 - -moviepy.video.tools.subtitles -============================= - - -.. automodule:: moviepy.video.tools.subtitles - - - - - - .. rubric:: Classes - - .. autosummary:: - :toctree: - :template: custom_autosummary/class.rst - - SubtitlesClip - - - - - - - .. rubric:: Functions - - .. autosummary:: - :toctree: - - file_to_subtitles - - - - - - - - - - diff --git a/_sources/user_guide/compositing.rst.txt b/_sources/user_guide/compositing.rst.txt deleted file mode 100644 index 9f395800e..000000000 --- a/_sources/user_guide/compositing.rst.txt +++ /dev/null @@ -1,137 +0,0 @@ -.. _compositing: - -Compositing multiple clips -========================================= - -Video composition, also known as non-linear editing, is the fact of mixing and playing several clips together in a new clip. This video is a good example of what compositing you can do with MoviePy: - -.. raw:: html - -
- -
- -.. note:: - Before starting, note that video clips generally carry an audio track and a mask, which are also clips. When you compose these clips together, the soundtrack and mask of the final clip are automatically generated by putting together the soundtracks and masks of the clips. - So most of the time you don't need to worry about mixing the audio and masks. - - -Juxtaposing and concatenating clips -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Two simple ways of putting clips together is to concatenate them (to play them one after the other in a single long clip) or to juxtapose them (to put them side by side in a single larger clip). - -Concatenating multiple clips -""""""""""""""""""""""""""""""""" - -Concatenation can be done very easily with the function :py:func:`~moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips`. - -.. literalinclude:: /_static/code/user_guide/compositing/concatenate.py - :language: python - -The ``final_clip`` is a clip that plays the clips 1, 2, and 3 one after the other. - -.. note:: - The clips do not need to be the same size. If they arent's they will all appear centered in a clip large enough to contain the biggest of them, with optionally a color of your choosing to fill the background. - -For more info, see :py:func:`~moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips`. - - -Juxtaposing multiple clips -"""""""""""""""""""""""""""""" - -Putting multiple clip side by side is done with :py:func:`~moviepy.video.compositing.CompositeVideoClip.clip_array`: - -.. literalinclude:: /_static/code/user_guide/compositing/juxtaposing.py - :language: python - -You obtain a clip which looks like this: - -.. figure:: /_static/medias/user_guide/stacked.jpeg - :align: center - -For more info, see :py:func:`~moviepy.video.compositing.CompositeVideoClip.clip_array`. - - -More complex video compositing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip` class is the base of all video compositing. -For example, internally, both :py:func:`~moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips` and :py:func:`~moviepy.video.compositing.CompositeVideoClip.clip_array` create a :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip`. - -It provides a very flexible way to compose clips, by playing multiple clip *on top of* of each other, in the order they have been passed to :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip`, here's an example : - -.. literalinclude:: /_static/code/user_guide/compositing/CompositeVideoClip.py - :language: python - -Now ``final_clip`` plays all clips at the same time, with ``clip3`` over ``clip2`` over ``clip1``. It means that, if all clips have the same size, then only ``clip3``, which is on top, will be visible in the video... -Unless ``clip3`` and/or ``clip2`` have masks which hide parts of them. - -.. note:: - Note that by default the composition has the size of its first clip (as it is generally a *background*). But sometimes you will want to make your clips *float* in a bigger composition. - To do so, just pass the size of the final composition as ``size`` parameter of :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip`. - -For now we have stacked multiple clip on top of each others, but this is obviously not enough for doing real video compositing. -For that, we will need to change when some clip start et stop to play, as well as define the x:y, position of thoses clips in the final video. - -For more info, see :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip`. - -Changing starting and stopping times of clips -"""""""""""""""""""""""""""""""""""""""""""""""" - -In a CompositionClip, each clip start to play at a time that is specified by his ``clip.start`` attribute, and will play until ``clip.end``. - -So, considering that you would want to play ``clip1`` for the first 6 seconds, ``clip2`` 5 seconds after the start of the video, and finally ``clip3`` at the end of ``clip2``, you would do as follows: - -.. literalinclude:: /_static/code/user_guide/compositing/with_start.py - :language: python - -.. note:: - When working with timing of your clip, you will frequently want to keep only parts of the original clip. - To do so, you should take a look at :py:meth:`~moviepy.Clip.Clip.with_subclip` and :py:meth:`~moviepy.Clip.Clip.with_cutout`. - - -Positioning clips -"""""""""""""""""" - -Frequently, you will want a smaller clip to appear on top of a larger one, and decide where it will appear in the composition by setting their position. - -You can do so by using the :py:meth:`~moviepy.video.VideoClip.VideoClip.with_position` method. The position is always defined from the top left corner, but you can define it -in many ways : - -.. literalinclude:: /_static/code/user_guide/compositing/with_start.py - :language: python - -When indicating the position keep in mind that the ``y`` coordinate has its zero at the top of the picture: - -.. figure:: /_static/medias/user_guide/videoWH.jpeg - - -Adding transitions effects -"""""""""""""""""""""""""" - -The last part of composition is adding transition effects. For example, when a clip start while another is still playing, it would be nice to make the new one fadein instead of showing abruptly. - -To do so, we can use the transitions offered by MoviePy in :py:mod:`~moviepy.video.compositing.transitions`, like :py:func:`~moviepy.video.compositing.transitions.crossfadein` : - -.. literalinclude:: /_static/code/user_guide/compositing/crossfadein.py - :language: python - - -MoviePy offer only few transitions in :py:mod:`~moviepy.video.compositing.transitions`. But technically, transitions are mostly effects applyed to the mask of a clip ! -That means you can actually use any of the already existing effects, and use them as transitions by applying them on the mask of your clip (see . - -For more info, see :py:mod:`~moviepy.video.compositing.transitions` and :py:mod:`moviepy.video.fx`. - - -Compositing audio clips -------------------------- - -When you mix video clips together, MoviePy will automatically compose their respective audio tracks to form the audio track of the final clip, so you don't need to worry about compositing these tracks yourself. - -If you want to make a custom audiotrack from several audio sources, audio clips can be mixed together like video clips, with :py:class:`~moviepy.audio.AudioClip.CompositeAudioClip` and :py:func:`~moviepy.audio.AudioClip.concatenate_audioclips`: - -.. literalinclude:: /_static/code/user_guide/compositing/CompositeAudioClip.py - :language: python - diff --git a/_sources/user_guide/create_effects.rst.txt b/_sources/user_guide/create_effects.rst.txt deleted file mode 100644 index 5866ce7d6..000000000 --- a/_sources/user_guide/create_effects.rst.txt +++ /dev/null @@ -1,46 +0,0 @@ -.. _create_effects: - -Creating your own effects -======================================================== - -In addition to the existings effects already offered by MoviePy, we can create our own effects to modify a clip as we want. - - -Why creating your own effects? ------------------------------------- - -For simple enough tasks, we've seen that we can :ref:`modifying#filters`. Though it might be enough for simple tasks, filters are kind of limited: - -- They can only access frame and/or timepoint -- We cannot pass pass arguments to them -- They are hard to maintain and re-use - -To allow for more complexe and reusable clip modifications, we can create our own custom effects, that we will later apply with :py:func:`~moviepy.Clip.Clip.with_effects`. - -For example, imagine we want to add a progress bar to a clip, to do so we will not only need the time and image of the current frame, but also the total duration of the clip. -We will also probably want to be able to pass parameters to define the apparence of the progress bar, such as color or height. This is a perfect task for an effect! - - -Creating an effect --------------------- - -In MoviePy, effects are objects of type :py:class:`moviepy.Effect.Effect`, which is the base ``abstract class`` for all effects (kind of the same as :py:class:`~moviepy.Clip.Clip` is the base for all :py:class:`~moviepy.video.VideoClip.VideoClip` and :py:class:`~moviepy.audio.AudioClip.AudioClip`). - -So, to create an effect, we will need to inherint the :py:class:`~moviepy.Effect.Effect` class, and do two things: - -- Create an ``__init__`` method to be able to received the parameters of our effect. -- Implement the inherited :py:meth:`~moviepy.Effect.Effect.apply` method, which must take as an argument the clip we want to modify, and return the modified version. - -In the end, your effect will probably use :py:func:`~moviepy.Clip.Clip.time_transform`, :py:func:`~moviepy.Clip.Clip.image_transform`, or :py:func:`~moviepy.Clip.Clip.transform` to really apply your modifications on the clip, -The main difference is, because your filter will be a method or an anonymous function inside your effect class, you will be able to access all properties of your object from it! - -So, lets see how we could create our progress bar effect: - -.. literalinclude:: /_static/code/user_guide/effects/custom_effect.py - :language: python - -.. note:: - When creating an effect, you frequently have to write boilerplate code for assigning properties on object initialization, ``dataclasses`` is a nice way to limit that. - -If you want to create your own effects, in addition of this documentation we strongly encourage you to go and take a look at the existing ones (see :py:mod:`moviepy.video.fx` and :py:mod:`moviepy.audio.fx`) to see how they works and take inspiration. - \ No newline at end of file diff --git a/_sources/user_guide/index.rst.txt b/_sources/user_guide/index.rst.txt deleted file mode 100644 index bb4b162c5..000000000 --- a/_sources/user_guide/index.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. _user_guide: - - -The MoviePy User Guide ------------------------------- - -The User Guide covers all of MoviePy main concepts grouped by tasks (loading, editing, composing, rendering), with a presentation of the differents concept/elements relative to the tasks, as well of short code example. - -It is a good place for users whishing to understand more precisely one of these aspects and to discover the different MoviePy elements relative to it. - -For users wanting to have a quick overview of how to use MoviePy, a better place to start is the :ref:`getting_started` section, and more specifically the :ref:`moviepy_10_minutes` tutorial. - -For a full overview of MoviePy, see the :ref:`reference_manual`. - -.. toctree:: - :maxdepth: 1 - - loading - modifying - create_effects - compositing - rendering diff --git a/_sources/user_guide/loading.rst.txt b/_sources/user_guide/loading.rst.txt deleted file mode 100644 index 2a77b6bb0..000000000 --- a/_sources/user_guide/loading.rst.txt +++ /dev/null @@ -1,278 +0,0 @@ -.. _loading: - -Loading resources as clips -=================================== - -The first step for making a video with MoviePy is to load the resources you wish to include in the final video. - -In this section we present the different sorts of clips and how to load them. -For information on modifying a clip, see :ref:`modifying`. For how to put clips together see :ref:`compositing`. And for how to see/save theme, see :ref:`rendering` (we will usually save them in example, but we wont explain here). - -There's a lot of different resources you can use with MoviePy, and you will load those differents resources with different subtypes of :py:class:`~moviepy.Clip.Clip`, and more preciselly of :py:class:`~moviepy.audio.AudioClip.AudioClip` for any audio element, or :py:class:`~moviepy.video.VideoClip.VideoClip` for any visual element. - -The following code summarizes the base clips that you can create with moviepy: - -.. literalinclude:: /_static/code/user_guide/loading/loading.py - :language: python - - -The best to understand all these clips more thoroughly is to read the full documentation for each in the :ref:`reference_manual`. - - -Realasing resources by closing a clip ---------------------------------------- - -When you create some types of clip instances - e.g. ``VideoFileClip`` or ``AudioFileClip`` - MoviePy creates a subprocess and locks the file. In order to release those resources when you are finished you should call the ``close()`` method. - -This is more important for more complex applications and is particularly important when running on Windows. While Python's garbage collector should eventually clean up the resources for you, closing them makes them available earlier. - -However, if you close a clip too early, methods on the clip (and any clips derived from it) become unsafe. - -So, the rules of thumb are: - - * Call ``close()`` on any clip that you **construct** once you have finished using it and have also finished using any clip that was derived from it. - * Even if you close a :py:class:`~moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip` instance, you still need to close the clips it was created from. - * Otherwise, if you have a clip that was created by deriving it from from another clip (e.g. by calling ``with_mask()``), then generally you shouldn't close it. Closing the original clip will also close the copy. - -Clips act as `context managers `_. This means you -can use them with a ``with`` statement, and they will automatically be closed at the end of the block, even if there is -an exception. - -.. literalinclude:: /_static/code/user_guide/loading/closing.py - :language: python - - -Categories of video clips --------------------------- - -Video clips are the building blocks of longer videos. Technically, they are clips with a ``clip.get_frame(t)`` method which outputs a ``HxWx3`` numpy array representing the frame of the clip at time ``t``. - -There are two main type of video clips: - -* animated clips (made with :py:class:`~moviepy.video.VideoClip.VideoFileClip`, :py:class:`~moviepy.video.VideoClip.VideoClip` and :py:class:`~moviepy.video.io.ImageSequenceClip.ImageSequenceClip`), which will always have duration. -* unanimated clips (made with :py:class:`~moviepy.video.VideoClip.ImageClip`, :py:class:`~moviepy.video.VideoClip`TextClip` and :py:class:`~moviepy.video.VideoClip.ColorClip`), which show the same picture for an a-priori infinite duration. - -There are also special video clips called masks, which belong to the categories above but output greyscale frames indicating which parts of another clip are visible or not. - -A video clip can carry around an audio clip (:py:class:`~moviepy.audio.AudioClip.AudioClip`) in :py:attr:`~moviepy.video.VideoClip.VideoClip.audio` which is its *soundtrack*, and a mask clip in :py:attr:`~moviepy.video.VideoClip.VideoClip.mask`. - -Animated clips -~~~~~~~~~~~~~~~ - -Thoses are clips whose image will change in time, and who have a duration and a number of Frames Per Second. - -VideoClip -"""""""""" - -:py:class:`~moviepy.video.VideoClip.VideoClip` is the base class for all the other video clips in MoviePy. If all you want is to edit video files, you will never need it. This class is practical when you want to make animations from frames that are generated by another library. -All you need is to define a function ``make_frame(t)`` which returns a `HxWx3` numpy array (of 8-bits integers) representing the frame at time ``t``. - -Here is an example where we will create a pulsating red circle with graphical library `pillow `_. - -.. literalinclude:: /_static/code/user_guide/loading/VideoClip.py - :language: python - -Resulting in this. - -.. image:: /_static/medias/user_guide/circle.gif - :width: 128 px - :align: center - :alt: A pulsating red circle on black background. - - -.. note:: - Clips that are made with a ``make_frame`` do not have an explicit frame rate nor duration by default, so you must provide duration at clip creation and a frame rate (``fps``, frames per second) for :py:meth:`~moviepy.video.VideoClip.VideoClip.write_gif` and :py:meth:`~moviepy.video.VideoClip.VideoClip.write_videofile`, and more generally for any methods that requires iterating through the frames. - -For more, see :py:class:`~moviepy.video.VideoClip.VideoClip`. - - -VideoFileClip -""""""""""""""" - -A :py:class:`~moviepy.video.io.VideoFileClip.VideoFileClip` is a clip read from a video file (most formats are supported) or a GIF file. This is probably one of the most used object ! You load the video as follows: - -.. literalinclude:: /_static/code/user_guide/loading/VideoFileClip.py - :language: python - -.. note:: - These clips will have an ``fps`` (frame per second) and ``duration`` attributes, which will be transmitted if you do small modifications of the clip, and will be used by default in :py:meth:`~moviepy.video.VideoClip.VideoClip.write_gif`, :py:meth:`~moviepy.video.VideoClip.VideoClip.write_videofile`, etc. - -For more, see :py:class:`~moviepy.video.io.VideoFileClip.VideoFileClip`. - - -ImageSequenceClip -"""""""""""""""""" - -This :py:class:`~moviepy.video.io.ImageSequenceClip.ImageSequenceClip` is a clip made from a series of images : - -.. literalinclude:: /_static/code/user_guide/loading/ImageSequenceClip.py - :language: python - -When creating an image sequence, ``sequence`` can be either a list of image names (that will be *played* in the provided order), a folder name (played in alphanumerical order), or a list of frames (Numpy arrays), obtained for instance from other clips. - -.. warning:: - All the images in list/folder/frames must be of the same size, or an exception will be raised - -For more, see :py:class:`~moviepy.video.io.ImageSequenceClip.ImageSequenceClip`. - - -DataVideoClip -"""""""""""""""""" - -:py:class:`~moviepy.video.io.VideoClip.DataVideoClip` is a video clip who take a list of datasets, a callback function, -and make each frame by iterating over dataset and invoking the callback function with the current data as first argument. - -You will probably never use this. But if you do, think of it like a :py:class:`~moviepy.video.VideoClip.VideoClip`, where you make frames not based on time, -but based on each entry of a data list. - -.. literalinclude:: /_static/code/user_guide/loading/DataVideoClip.py - :language: python - -For more, see For more, see :py:class:`~moviepy.video.io.VideoClip.DataVideoClip`. - - -UpdatedVideoClip -"""""""""""""""""" - -.. warning:: - This is really advanced usage, you will probably never need it, if you do, please go read the code. - -:py:class:`~moviepy.video.io.VideoClip.UpdatedVideoClip` is a video whose make_frame requires some objects to be updated before we can compute it. - -This is particularly practical in science where some algorithm needs to make some steps before a new frame can be generated, or maybe when trying to make a video based on a live exterior context. - -When you use this, you pass a world object to it. A world object is an object who respect thoses 3 rules : - -#. It has a ``clip_t`` property, indicating the current world time. -#. It has an ``update()`` method, that will update the world state and is responsible for increasing ``clip_t`` when a new frame can be drown. -#. It has a ``to_frame()`` method, that will render a frame based on world current state. - -On :py:meth:`~moviepy.video.io.VideoClip.UpdatedVideoClip.get_frame` call, your :py:class:`~moviepy.video.io.VideoClip.UpdatedVideoClip` will try to update the world until ``world.clip_t`` is superior or equal to frame time, then it will call ``world.to_frame()``. - -.. literalinclude:: /_static/code/user_guide/loading/UpdatedVideoClip.py - :language: python - - - -Unanimated clips -~~~~~~~~~~~~~~~~ - -Thoses are clips whose image will, at least before modifications, stay the same. By default they have no duration nor FPS. Meaning you will need to define thoses if you try to do operation needing such information (for example rendering). - - -ImageClip -"""""""""" - -:py:class:`~moviepy.video.VideoClip.ImageClip` is the base class for all unanimated clips, it's a video clip that always displays the same image. Along with :py:class:`~moviepy.video.io.VideoFileClip.VideoFileClip` it's one of the most used kind of clip. -You can create one as follows: - -.. literalinclude:: /_static/code/user_guide/loading/ImageClip.py - :language: python - -For more, see :py:class:`~moviepy.video.VideoClip.ImageClip`. - - -TextClip -""""""""""""""" - -A :py:class:`~moviepy.video.VideoClip.TextClip` is a clip that will turn a text string into an image clip. - -:py:class:`~moviepy.video.VideoClip.TextClip` accept many parameters, letting you configure the apparence of the text, such as font and font size, -color, interlining, text alignement, etc. - -The font you want to use must be an `OpenType font `_, and you will set it by passing the path to the font file. - -Here are a few example of using :py:class:`~moviepy.video.VideoClip.TextClip` : - -.. literalinclude:: /_static/code/user_guide/loading/TextClip.py - :language: python - -.. note:: - The parameter ``method`` let you define if text should be written and overflow if too long (``label``) or be automatically breaked (``caption``). - -For a more detailed explaination of all the parameters, see :py:class:`~moviepy.video.VideoClip.TextClip`. - - -ColorClip -""""""""""""""" - -A :py:class:`~moviepy.video.VideoClip.ColorClip` is a clip that will return an image of only one color. It is sometimes usefull when doing compositing (see :ref:`compositing`). - -.. literalinclude:: /_static/code/user_guide/loading/ColorClip.py - :language: python - -For more, see :py:class:`~moviepy.video.VideoClip.ColorClip`. - - -.. _loading#masks: - -Mask clips -~~~~~~~~~~~~~~ - -Masks are a special kind of :py:class:`~moviepy.video.VideoClip.VideoClip` with the property ``is_mask`` set to ``True``. They can be attached to any other kind of :py:class:`~moviepy.video.VideoClip.VideoClip` through method :py:meth:`~moviepy.video.VideoClip.VideoClip.with_mask`. - -When a clip as a mask attached to it, this mask will indicate which pixels will be visible when the clip is composed with other clips (see :ref:`compositing`). Masks are also used to define transparency when you export the clip as GIF file or as a PNG. - -The fundamental difference between masks and standard clips is that standard clips output frames with 3 components (R-G-B) per pixel, comprised between 0 and 255, while a mask has just one composant per pixel, between 0 and 1 (1 indicating a fully visible pixel and 0 a transparent pixel). Seen otherwise, a mask is always in greyscale. - -When you create or load a clip that you will use as a mask you need to declare it. You can then attach it to a clip with the same dimensions : - -.. literalinclude:: /_static/code/user_guide/loading/masks.py - :language: python - -.. note:: - In the case of video and image files, if these are not already black and white they will be converted automatically. - - Also, when you load an image with an *alpha layer*, like a PNG, MoviePy will use this layer as a mask, except if you pass ``transparent=False``. - - -Any video clip can be turned into a mask with :py:meth:`~moviepy.video.VideoClip.VideoClip.to_mask`, and a mask can be turned to a standard RGB video clip with :py:meth:`~moviepy.video.VideoClip.VideoClip.to_RGB()`. - -Masks are treated differently by many methods (because their frames are different) but at the core, they are :py:class:`~moviepy.video.VideoClip.VideoClip`, so you can do with theme everything you can do with a video clip: modify, cut, apply effects, save, etc. - - -Using audio elements with audio clips --------------------------------------- - -In addition to :py:class:`~moviepy.video.VideoClip.VideoClip` for visual, you can use audio elements, like an audio file, using the :py:class:`~moviepy.audio.AudioClip.AudioClip` class. - -Both are quite similar, except :py:class:`~moviepy.audio.AudioClip.AudioClip` method :py:meth:`~moviepy.audio.AudioClip.AudioClip.get_frame` return a numpy array of size ``Nx1`` for mono, and size ``Nx2`` for stereo. - - -AudioClip -~~~~~~~~~~ - -:py:class:`~moviepy.audio.AudioClip.AudioClip` is the base class for all audio clips. If all you want is to edit audio files, you will never need it. - -All you need is to define a function ``make_frame(t)`` which returns a ``Nx1`` or ``Nx2`` numpy array representing the sound at time ``t``. - -.. literalinclude:: /_static/code/user_guide/loading/AudioClip.py - :language: python - -For more, see :py:class:`~moviepy.audio.AudioClip.AudioClip`. - - -AudioFileClip -~~~~~~~~~~~~~~~~~~~~ - -:py:class:`~moviepy.audio.io.AudioFileClip.AudioFileClip` is used to load an audio file, this is probably the only kind of audio clip you will use. - -You simply pass him the file you want to load : - -.. literalinclude:: /_static/code/user_guide/loading/AudioFileClip.py - :language: python - -For more, see :py:class:`~moviepy.audio.io.AudioFileClip.AudioFileClip`. - - -AudioArrayClip -~~~~~~~~~~~~~~~~~~~~ - -:py:class:`~moviepy.audio.AudioClip.AudioArrayClip` is used to turn an array representing a sound into an audio clip. You will probably never use it, unless you need to use the result of some third library without using a temporary file. - -You need to provide a numpy array representing the sound (of size ``Nx1`` for mono, ``Nx2`` for stereo), and the number of fps, indicating the speed at which the sound is supposed to be played. - -.. literalinclude:: /_static/code/user_guide/loading/AudioArrayClip.py - :language: python - -For more, see :py:class:`~moviepy.audio.AudioClip.AudioArrayClip`. \ No newline at end of file diff --git a/_sources/user_guide/modifying.rst.txt b/_sources/user_guide/modifying.rst.txt deleted file mode 100644 index be98470dc..000000000 --- a/_sources/user_guide/modifying.rst.txt +++ /dev/null @@ -1,161 +0,0 @@ -.. _modifying: - -Modifying clips and apply effects -=================================== - -Of course, once you will have loaded a :py:class:`~moviepy.Clip.Clip` the next step of action will be to modify it to be able to integrate it in your final video. - -To modify a clip, there is three main courses of actions : - * The built-in methods of :py:class:`~moviepy.video.VideoClip.VideoClip` or :py:class:`~moviepy.audio.AudioClip.AudioClip` modifying the properties of the object. - * The already-implemented effects of MoviePy you can apply on clips, usually affecting the clip by applying filters on each frame of the clip at rendering time. - * The transformation filters that you can apply using :py:func:`~moviepy.Clip.Clip.transform` and :py:func:`~moviepy.Clip.Clip.time_transform`. - - -How modifications are applied to a clip ? -------------------------------------------------------- - -Clip copy during modification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The first thing you must know is that when modifying a clip, MoviePy **will never modify that clip directly**. -Instead it will return **a modified copy of the original** and let the original untouched. This is known as out-place instead of in-place behavior. - -To illustrate : - -.. literalinclude:: /_static/code/user_guide/effects/modify_copy_example.py - :language: python - -This is an important point to understand, because it is one of the most recurrent source of bug for newcomers. - - -Memory consumption of effect and modifications -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When applying an effect or modification, it does not immediately apply the effect to all the frames of the clip, but only to the first frame: all the other frames will only be modified when required (that is, when you will write the whole clip to a file of when you will preview it). - -It means that creating a new clip is neither time nor memory hungry, all the computation happen during the final rendering. - - -Time representations in MoviePy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Many methods that we will see accept duration or timepoint as arguments. For instance :py:meth:`clip.with_subclip(t_start, t_end) ` which cuts the clip between two timepoints. - -MoviePy usually accept duration and timepoint as either : - -* a number of seconds as a ``float``. -* a ``tuple`` with ``(minutes, seconds)`` or ``(hours, minutes, seconds)``. -* a ``string`` such as ``'00:03:50.54'``. - -Also, you can usually provide negative times, indicating a time from the end of the clip. For example, ``clip.with_subclip(-20, -10)`` cuts the clip between 20s before the end and 10s before the end. - - -Modify a clip using the ``with_*`` methods -------------------------------------------------------- - -The first way to modify a clip is by modifying internal properties of your object, thus modifying his behavior. - -Thoses methods usually starts with the prefix ``with_`` or ``without_``, indicating that they will return a copy of the clip with the properties modified. - -So, you may write something like : - -.. literalinclude:: /_static/code/user_guide/effects/using_with_methods.py - :language: python - -In addition to the ``with_*`` methods, a handful of very common methods are also accessible under shorter name, thoses are: - -- :py:meth:`~moviepy.video.VideoClip.VideoClip.resized` -- :py:meth:`~moviepy.video.VideoClip.VideoClip.crop` -- :py:meth:`~moviepy.video.VideoClip.VideoClip.rotate` - -For a list of all those methods, see :py:class:`~moviepy.Clip.Clip` and :py:class:`~moviepy.video.VideoClip.VideoClip`. - - -.. _modifying#effects: - -Modify a clip using effects ---------------------------------- - -The second way to modify a clip is by using effects that will modify the frames of the clip (which internally are no more than `numpy arrays `_) by applying some sort of functions on them. - -MoviePy come with many effects implemented in :py:mod:`moviepy.video.fx` for visual effects and :py:mod:`moviepy.audio.fx` for audio effects. -For practicality, these two modules are loaded in MoviePy as ``vfx`` and ``afx``, letting you import them as ``from moviepy import vfx, afx``. - -To use thoses effects, you simply need to instanciate them as object and apply them on your :py:class:`~moviepy.Clip.Clip` using method :py:meth:`~moviepy.Clip.Clip.with_effects`, with a list of :py:class:`~moviepy.Effect.Effect` objects you want to apply. - -For convenience the effects are also dynamically added as method of :py:class:`~moviepy.video.VideoClip.VideoClip` and :py:class:`~moviepy.video.AudioClip.AudioClip` classes at runtime, letting you call them as simple method of your clip. - -So, you may write something like : - -.. literalinclude:: /_static/code/user_guide/effects/using_effects.py - :language: python - -.. note:: - MoviePy effects are automatically applied to both the sound and the mask of the clip if it is relevant, so that you don’t have to worry about modifying these. - -For a list of those effects, see :py:mod:`moviepy.video.fx` and :py:mod:`moviepy.audio.fx`. - -In addition to the effects already provided by MoviePy, you can obviously :ref:`create_effects` and use them the same way. - -.. _modifying#filters: - -Modify a clip apparence and timing using filters ----------------------------------------------------------- - -In addition to modify a clip properties and using effects, you can also modify the apparence or timing of a clip by using your own custom *filters* with :py:func:`~moviepy.Clip.Clip.time_transform`, :py:func:`~moviepy.Clip.Clip.image_transform`, and more generally with :py:func:`~moviepy.Clip.Clip.transform`. - -All thoses methods works by taking as first parameter a callback function that will receive either a clip frame, a timepoint, or both, and return a modified version of thoses. - -Modify only the timing of a Clip -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can change the timeline of the clip with :py:meth:`time_transform(your_filter) `. -Where ``your_filter`` is a callback function taking clip time as a parameter and returning a new time : - -.. literalinclude:: /_static/code/user_guide/effects/time_transform.py - :language: python - -Now the clip ``modified_clip1`` plays three times faster than ``my_clip``, while ``modified_clip2`` will be oscillating between 00:00:00 to 00:00:02 of ``my_clip``. Note that in the last case you have created a clip of infinite duration (which is not a problem for the moment). - -.. note:: - By default :py:func:`~moviepy.Clip.Clip.time_transform` will only modify the clip main frame, without modifying clip audio or mask for :py:class:`~moviepy.video.VideoClip.VideoClip`. - - If you wish to also modify audio and/or mask you can provide the parameter ``apply_to`` with either ``'audio'``, ``'mask'``, or ``['audio', 'mask']``. - - -Modifying only the apparence of a Clip -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For :py:class:`~moviepy.video.VideoClip.VideoClip`, you can change the apparence of the clip with :py:meth:`image_transform(your_filter) `. -Where ``your_filter`` is a callback function, taking clip frame (a numpy array) as a parameter and returning the transformed frame : - -.. literalinclude:: /_static/code/user_guide/effects/image_transform.py - :language: python - -Now the clip ``modified_clip1`` will have his green and blue canals inverted. - -.. note:: - You can define if transformation should be applied to audio and mask same as for :py:func:`~moviepy.Clip.Clip.time_transform`. - -.. note:: - Sometimes need to treat clip frames and mask frames in a different way. To distinguish between the two, you can always look at their shape, clips are ``H*W*3``, and masks ``H*W``. - - -Modifying both the apparence and the timing of a Clip -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Finally, you may want to process the clip by taking into account both the time and the frame picture, for example to apply visual effects variating with time. -This is possible with the method :py:meth:`transform(your_filter) `. -Where ``your_filter`` is a callback function taking two parameters, and returning a new frame picture. Where first argument is a ``get_frame`` method (i.e. a function ``get_frame(time)`` which given a time returns the clip’s frame at that time), and the second argument is the time. - -.. literalinclude:: /_static/code/user_guide/effects/transform.py - :language: python - -This will scroll down the clip, with a constant height of 360 pixels. - -.. note:: - You can define if transformation should be applied to audio and mask same as for :py:func:`~moviepy.Clip.Clip.time_transform`. - -.. note:: - When programming a new effect, whenever it is possible, prefer using ``time_transform`` and ``image_transform`` instead of ``transform`` when implementing new effects. - The reason is that, though they both internally relly on ``transform`` when these effects are applied to ``ImageClip`` objects, MoviePy will recognize they only need to be applied once instead of on each frame, resulting in faster renderings. - -To keep things simple, we have only addressed the case of :py:class:`~moviepy.video.VideoClip.VideoClip`, but know that the same principle applies to :py:class:`~moviepy.audio.AudioClip.AudioClip`, except that instead of a picture frame, you will have an audio frame, which is also a numpy array. \ No newline at end of file diff --git a/_sources/user_guide/rendering.rst.txt b/_sources/user_guide/rendering.rst.txt deleted file mode 100644 index 535fd444b..000000000 --- a/_sources/user_guide/rendering.rst.txt +++ /dev/null @@ -1,145 +0,0 @@ -.. _rendering: - -Previewing and saving video clips -==================================== - -Once you are down working with your clips, the last step will be to export the result into a video/image file, or sometimes to simply preview it in order to verify everything is working as expected. - -Previewing a clip -""""""""""""""""""""" - -When you are working with a clip, you will frequently need to have a peak at what your clip looks like, either to verify that everything is working as intended, or to check how things looks. - -To do so you could render your entire clip into a file, but that's a pretty long task, and you only need a quick look, so a better solution exists: previewing. - -Preview a clip as a video -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. warning:: - You must have ``FFPLAY`` installed and accessible to MoviePy to be able to use :py:func:`~moviepy.video.io.preview.preview`. - If you'r not sure, take a look :ref:`install#binaries` - -The first thing you can do is to preview your clip as a video, by calling method :py:func:`~moviepy.video.io.preview.preview` on your clip: - -.. literalinclude:: /_static/code/user_guide/rendering/preview.py - :language: python - -You will probably frequently want to preview only a small portion of your clip, though ``preview`` do not offer such capabilities, you can easily emulate such behavior by using :py:meth:`~moviepy.Clip.Clip.with_subclip`. - -.. note:: - It is quite frequent for a clip preview to be out of sync, or to play slower than it should. It means that your computer is not powerful enough to render the clip in real time. - - Don’t hesitate to play with the options of preview: for instance, lower the fps of the sound (11000 Hz is still fine) and the video. Also, downsizing your video with resize can help. - -For more info, see :py:func:`~moviepy.video.io.preview.preview`. - -.. note:: - A quite similar function is also available for :py:func:`~moviepy.audio.AudioClip.AudioClip`, see :py:func:`~moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview`. - - -Preview just one frame of a clip -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In a lot of situation, you dont really need to preview your all clip, seeing only one frame is enough to see how it looks like and to make sure everything goes as expected. - -To do so, you can use the method :py:func:`~moviepy.video.io.preview.show` on your clip, passing the frame time as an argument: - -.. literalinclude:: /_static/code/user_guide/rendering/show.py - :language: python - -Contrary to video previewing, show does not require ``ffplay``, but use ``pillow`` ``Image.show`` function. - -For more info, see :py:func:`~moviepy.video.io.preview.show`. - - -Showing a clip in Jupyter Notebook -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you work with a `Jupyter Notebook `_, it can be very practical to display your clip the notebook. To do so, you can use the method :py:func:`~moviepy.video.io.display_in_notebook.display_in_notebook` on your clip. - -.. image:: /_static/medias/user_guide/demo_preview.jpeg - :width: 500px - :align: center - -With :py:func:`~moviepy.video.io.display_in_notebook.display_in_notebook` you can embed videos, images and sounds, either from a file or directly from a clip: - -.. literalinclude:: /_static/code/user_guide/rendering/display_in_notebook.py - :language: python - - -.. warning:: - Know that :py:func:`~moviepy.video.io.display_in_notebook.display_in_notebook` will only work if it is on the last line a the notebook cell. - - Also, note that :py:func:`~moviepy.video.io.display_in_notebook.display_in_notebook` actually embeds the clips physically in your notebook. The advantage is that you can move the notebook or put it online and the videos will work. - The drawback is that the file size of the notebook can become very large. Depending on your browser, re-computing and displaying at video many times can take some place in the cache and the RAM (it will only be a problem for intensive uses). - Restarting your browser solves the problem. - - -For more info, see :py:func:`~moviepy.video.io.display_in_notebook.display_in_notebook`. - - -Save your clip into a file -"""""""""""""""""""""""""""""""""""""""" - -Once you are satisfied with how your clip looks, you can save it into a file, a step known in video edition as rendering. MoviePy offer various way to save your clip. - -Video files (.mp4, .webm, .ogv...) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The obvious first choice will be to write your clip to a video file, which you can do with :py:meth:`~moviepy.video.VideoClip.VideoClip.write_videofile`: - -.. literalinclude:: /_static/code/user_guide/rendering/write_videofile.py - :language: python - -MoviePy can find the a default codec name for the most common file extensions. If you want to use exotic formats or if you are not happy with the defaults you can provide the codec with ``codec='mpeg4'`` for instance. - -There are many many options when you are writing a video (bitrate, parameters of the audio writing, file size optimization, number of processors to use, etc.), and we will not go in details into each. So, for more info, see :py:meth:`~moviepy.video.VideoClip.VideoClip.write_videofile`. - -.. note:: - Though you are encouraged to play with settings of ``write_videofile``, know that lowering the optimization preset, or increasing the number of threads will not necessarly - improve the rendering time, as the bottleneck may be on MoviePy computation of each frame and not in ffmpeg encoding. - - Also, know that it is possible to pass additional parameters to ffmpeg command line invoked by MoviePy by using the ``ffmpeg_params`` argument. - -Sometimes it is impossible for MoviePy to guess the ``duration`` attribute of the clip (keep in mind that some clips, like ImageClips displaying a picture, have *a priori* an infinite duration). Then, the ``duration`` must be set manually with :py:meth:`~moviepy.Clip.Clip.with_duration`: - -.. literalinclude:: /_static/code/user_guide/rendering/write_videofile_duration.py - :language: python - - -.. note:: - A quite similar function is also available for :py:func:`~moviepy.audio.AudioClip.AudioClip`, see :py:func:`~moviepy.audio.io.AudioClip.write_audiofile`. - - -Export a single frame of the clip -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As for previewing, sometimes you will need to export only one frame of a clip, for example to create the preview image of a video. You can do so with :py:meth:`~moviepy.video.VideoClip.VideoClip.save_frame`: - -.. literalinclude:: /_static/code/user_guide/rendering/save_frame.py - :language: python - -For more info, see :py:func:`~moviepy.video.VideoClip.VideoClip.save_frame`. - - -Animated GIFs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In addition to writing video files, MoviePy also let you write GIF file with :py:meth:`~moviepy.video.VideoClip.VideoClip.write_gif`: - -.. literalinclude:: /_static/code/user_guide/rendering/write_gif.py - :language: python - - -For more info, see :py:func:`~moviepy.video.VideoClip.VideoClip.write_gif`. - - -Export all the clip as images in a directory -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Lastly, you may wish to export an entire clip as an image sequence (multiple images in one directory, one image per frame). You can do so with the function :py:meth:`~moviepy.video.VideoClip.VideoClip.write_images_sequence`: - -.. literalinclude:: /_static/code/user_guide/rendering/write_images_sequence.py - :language: python - -For more info, see :py:func:`~moviepy.video.VideoClip.VideoClip.write_images_sequence`. diff --git a/_sphinx_design_static/design-tabs.js b/_sphinx_design_static/design-tabs.js deleted file mode 100644 index b25bd6a4f..000000000 --- a/_sphinx_design_static/design-tabs.js +++ /dev/null @@ -1,101 +0,0 @@ -// @ts-check - -// Extra JS capability for selected tabs to be synced -// The selection is stored in local storage so that it persists across page loads. - -/** - * @type {Record} - */ -let sd_id_to_elements = {}; -const storageKeyPrefix = "sphinx-design-tab-id-"; - -/** - * Create a key for a tab element. - * @param {HTMLElement} el - The tab element. - * @returns {[string, string, string] | null} - The key. - * - */ -function create_key(el) { - let syncId = el.getAttribute("data-sync-id"); - let syncGroup = el.getAttribute("data-sync-group"); - if (!syncId || !syncGroup) return null; - return [syncGroup, syncId, syncGroup + "--" + syncId]; -} - -/** - * Initialize the tab selection. - * - */ -function ready() { - // Find all tabs with sync data - - /** @type {string[]} */ - let groups = []; - - document.querySelectorAll(".sd-tab-label").forEach((label) => { - if (label instanceof HTMLElement) { - let data = create_key(label); - if (data) { - let [group, id, key] = data; - - // add click event listener - // @ts-ignore - label.onclick = onSDLabelClick; - - // store map of key to elements - if (!sd_id_to_elements[key]) { - sd_id_to_elements[key] = []; - } - sd_id_to_elements[key].push(label); - - if (groups.indexOf(group) === -1) { - groups.push(group); - // Check if a specific tab has been selected via URL parameter - const tabParam = new URLSearchParams(window.location.search).get( - group - ); - if (tabParam) { - console.log( - "sphinx-design: Selecting tab id for group '" + - group + - "' from URL parameter: " + - tabParam - ); - window.sessionStorage.setItem(storageKeyPrefix + group, tabParam); - } - } - - // Check is a specific tab has been selected previously - let previousId = window.sessionStorage.getItem( - storageKeyPrefix + group - ); - if (previousId === id) { - // console.log( - // "sphinx-design: Selecting tab from session storage: " + id - // ); - // @ts-ignore - label.previousElementSibling.checked = true; - } - } - } - }); -} - -/** - * Activate other tabs with the same sync id. - * - * @this {HTMLElement} - The element that was clicked. - */ -function onSDLabelClick() { - let data = create_key(this); - if (!data) return; - let [group, id, key] = data; - for (const label of sd_id_to_elements[key]) { - if (label === this) continue; - // @ts-ignore - label.previousElementSibling.checked = true; - } - window.sessionStorage.setItem(storageKeyPrefix + group, id); -} - -document.addEventListener("DOMContentLoaded", ready, false); diff --git a/_sphinx_design_static/sphinx-design.4cbf315f70debaebd550c87a6162cf0f.min.css b/_sphinx_design_static/sphinx-design.4cbf315f70debaebd550c87a6162cf0f.min.css deleted file mode 100644 index 860c36da0..000000000 --- a/_sphinx_design_static/sphinx-design.4cbf315f70debaebd550c87a6162cf0f.min.css +++ /dev/null @@ -1 +0,0 @@ -.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative;font-size:var(--sd-fontsize-dropdown)}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary.sd-summary-title{padding:.5em .6em .5em 1em;font-size:var(--sd-fontsize-dropdown-title);font-weight:var(--sd-fontweight-dropdown-title);user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;list-style:none;display:inline-flex;justify-content:space-between}details.sd-dropdown summary.sd-summary-title::-webkit-details-marker{display:none}details.sd-dropdown summary.sd-summary-title:focus{outline:none}details.sd-dropdown summary.sd-summary-title .sd-summary-icon{margin-right:.6em;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary.sd-summary-title .sd-summary-text{flex-grow:1;line-height:1.5;padding-right:.5rem}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker{pointer-events:none;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker svg{opacity:.6}details.sd-dropdown summary.sd-summary-title:hover .sd-summary-state-marker svg{opacity:1;transform:scale(1.1)}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown .sd-summary-chevron-right{transition:.25s}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-right{transform:rotate(90deg)}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-down{transform:rotate(180deg)}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #0071bc;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0060a0;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-bg: rgba(0, 113, 188, 0.2);--sd-color-secondary-bg: rgba(108, 117, 125, 0.2);--sd-color-success-bg: rgba(40, 167, 69, 0.2);--sd-color-info-bg: rgba(23, 162, 184, 0.2);--sd-color-warning-bg: rgba(240, 179, 126, 0.2);--sd-color-danger-bg: rgba(220, 53, 69, 0.2);--sd-color-light-bg: rgba(248, 249, 250, 0.2);--sd-color-muted-bg: rgba(108, 117, 125, 0.2);--sd-color-dark-bg: rgba(33, 37, 41, 0.2);--sd-color-black-bg: rgba(0, 0, 0, 0.2);--sd-color-white-bg: rgba(255, 255, 255, 0.2);--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem;--sd-fontsize-dropdown: inherit;--sd-fontsize-dropdown-title: 1rem;--sd-fontweight-dropdown-title: 700} diff --git a/_static/code/getting_started/moviepy_10_minutes/trailer.py b/_static/code/getting_started/moviepy_10_minutes/trailer.py deleted file mode 100644 index f530c383a..000000000 --- a/_static/code/getting_started/moviepy_10_minutes/trailer.py +++ /dev/null @@ -1,307 +0,0 @@ -# Lets import moviepy, lets also import numpy we will use it a some point -from moviepy import * -import numpy as np - - -################# -# VIDEO LOADING # -################# -# We load our video -video = VideoFileClip("./resources/bbb.mp4") - - -##################### -# SCENES EXTRACTION # -##################### -# We extract the scenes we want to use - -# First the characters -intro_clip = video.with_subclip(1, 11) -bird_clip = video.with_subclip(16, 20) -bunny_clip = video.with_subclip(37, 55) -rodents_clip = video.with_subclip( - "00:03:34.75", "00:03:56" -) # we can also use string notation with format HH:MM:SS.uS -rambo_clip = video.with_subclip("04:41.5", "04:44.70") - - -##################### -# SCENES PREVIEWING # -##################### -# Now, lets have a first look at our clips -# Warning: you need ffplay installed for preview to work -# We set a low fps so our machine can render in real time without slowing down -intro_clip.preview(fps=20) -bird_clip.preview(fps=20) -bunny_clip.preview(fps=20) -rodents_clip.preview(fps=20) -rambo_clip.preview(fps=20) - - -############################## -# CLIPS MODIFICATION CUTTING # -############################## -# Well, looking at the rodent scene it is a bit long isn't? -# Let's see how we modify the clip with one of the many clip manipulation method starting by with_* -# in that case by removing of the clip the part between 00:06:00 to 00:10:00 of the clip, using with_cutout -rodents_clip = rodents_clip.with_cutout(start_time=4, end_time=10) - -# Note: You may have noticed that we have reassign rodents_clip, this is because all with_* methods return a modified *copy* of the -# original clip instead of modifying it directly. In MoviePy any function starting by with_* is out-place instead of in-place -# meaning it does not modify the original data, but instead copy it and modify/return the copy - -# Lets check the result -rodents_clip.preview(fps=10) - -############################ -# TEXT/LOGO CLIPS CREATION # -############################ -# Lets create the texts to put between our clips -font = "./resources/font/font.ttf" -intro_text = TextClip( - font=font, - text="The Blender Foundation and\nPeach Project presents", - font_size=50, - color="#fff", - text_align="center", -) -bird_text = TextClip(font=font, text="An unlucky bird", font_size=50, color="#fff") -bunny_text = TextClip( - font=font, text="A (slightly overweight) bunny", font_size=50, color="#fff" -) -rodents_text = TextClip( - font=font, text="And three rodent pests", font_size=50, color="#fff" -) -revenge_text = TextClip( - font=font, text="Revenge is coming...", font_size=50, color="#fff" -) -made_with_text = TextClip(font=font, text="Made with", font_size=50, color="#fff") - -# We will also need the big buck bunny logo, so lets load it and resize it -logo_clip = ImageClip("./resources/logo_bbb.png").resized(width=400) -moviepy_clip = ImageClip("./resources/logo_moviepy.png").resized(width=300) - - -################ -# CLIPS TIMING # -################ -# We have all the clips we need, but if we was to turn all thoses clips into a single one with composition (we will see that during next step) -# all our clips would start at the same time and play on top of each other, which is obviously not what we want. -# To fix that, we need to say when a clip should start and stop in the final clip. -# So, lets start by telling when each clip must start and end with appropriate with_* methods -intro_text = intro_text.with_duration(6).with_start( - 3 -) # Intro for 6 seconds, start after 3 seconds -logo_clip = logo_clip.with_start(intro_text.start + 2).with_end( - intro_text.end -) # Logo start 2 second after intro text and stop with it -bird_clip = bird_clip.with_start( - intro_clip.end -) # Make bird clip start after intro, duration already known -bird_text = bird_text.with_start(bird_clip.start).with_end( - bird_clip.end -) # Make text synchro with clip -bunny_clip = bunny_clip.with_start(bird_clip.end) # Make bunny clip follow bird clip -bunny_text = bunny_text.with_start(bunny_clip.start + 2).with_duration(7) -rodents_clip = rodents_clip.with_start(bunny_clip.end) -rodents_text = rodents_text.with_start(rodents_clip.start).with_duration(4) -rambo_clip = rambo_clip.with_start(rodents_clip.end - 1.5) -revenge_text = revenge_text.with_start(rambo_clip.start + 1.5).with_duration(4) -made_with_text = made_with_text.with_start(rambo_clip.end).with_duration(3) -moviepy_clip = moviepy_clip.with_start(made_with_text.start).with_duration(3) - - -######################## -# CLIPS TIMING PREVIEW # -######################## -# Lets make a first compositing of thoses clips into one single clip and do a quick preview to see if everything is synchro - -quick_compo = CompositeVideoClip( - [ - intro_clip, - intro_text, - logo_clip, - bird_clip, - bird_text, - bunny_clip, - bunny_text, - rodents_clip, - rodents_text, - rambo_clip, - revenge_text, - made_with_text, - moviepy_clip, - ] -) -quick_compo.preview(fps=10) - - -###################### -# CLIPS POSITIONNING # -###################### -# Now that we have set the timing of our different clips, we need to make sure they are in the right position -# We will keep things simple, and almost always set center center for every texts -bird_text = bird_text.with_position(("center", "center")) -bunny_text = bunny_text.with_position(("center", "center")) -rodents_text = rodents_text.with_position(("center", "center")) -revenge_text = revenge_text.with_position(("center", "center")) - -# For the logos and intro/end, we will use pixel position instead of center -top = intro_clip.h // 2 -intro_text = intro_text.with_position(("center", 200)) -logo_clip = logo_clip.with_position(("center", top)) -made_with_text = made_with_text.with_position(("center", 300)) -moviepy_clip = moviepy_clip.with_position(("center", 360)) - -# Lets take another look to check positions -quick_compo = CompositeVideoClip( - [ - intro_clip, - intro_text, - logo_clip, - bird_clip, - bird_text, - bunny_clip, - bunny_text, - rodents_clip, - rodents_text, - rambo_clip, - revenge_text, - made_with_text, - moviepy_clip, - ] -) -quick_compo.preview(fps=10) - - -################################ -# CLIPS TRANSITION AND EFFECTS # -################################ -# Now that our clip are timed and positionned, lets add some transition to make it more natural -# To do so we use the with_effects method and the video effects in vfx -# We call with_effects on our clip and pass him an array of effect objects to apply -# We'll keep it simple, nothing fancy just cross fading -intro_text = intro_text.with_effects([vfx.CrossFadeIn(1), vfx.CrossFadeOut(1)]) -logo_clip = logo_clip.with_effects([vfx.CrossFadeIn(1), vfx.CrossFadeOut(1)]) -bird_text = bird_text.with_effects([vfx.CrossFadeIn(0.5), vfx.CrossFadeOut(0.5)]) -bunny_text = bunny_text.with_effects([vfx.CrossFadeIn(0.5), vfx.CrossFadeOut(0.5)]) -rodents_text = rodents_text.with_effects([vfx.CrossFadeIn(0.5), vfx.CrossFadeOut(0.5)]) - -# Also add cross fading on video clips and video clips audio -# See how video effects are under vfx and audio ones under afx -intro_clip = intro_clip.with_effects( - [vfx.FadeIn(1), vfx.FadeOut(1), afx.AudioFadeIn(1), afx.AudioFadeOut(1)] -) -bird_clip = bird_clip.with_effects( - [vfx.FadeIn(1), vfx.FadeOut(1), afx.AudioFadeIn(1), afx.AudioFadeOut(1)] -) -bunny_clip = bunny_clip.with_effects( - [vfx.FadeIn(1), vfx.FadeOut(1), afx.AudioFadeIn(1), afx.AudioFadeOut(1)] -) -rodents_clip = rodents_clip.with_effects( - [vfx.FadeIn(1), vfx.CrossFadeOut(1.5), afx.AudioFadeIn(1), afx.AudioFadeOut(1.5)] -) # Just fade in, rambo clip will do the cross fade -rambo_clip = rambo_clip.with_effects( - [vfx.CrossFadeIn(1.5), vfx.FadeOut(1), afx.AudioFadeIn(1.5), afx.AudioFadeOut(1)] -) -rambo_clip = rambo_clip.with_effects( - [vfx.CrossFadeIn(1.5), vfx.FadeOut(1), afx.AudioFadeIn(1.5), afx.AudioFadeOut(1)] -) - -# Effects are not only for transition, they can also change a clip timing or apparence -# To show that, lets also modify the Rambo-like part of our clip to be in slow motion -# PS : We do it for effect, but this is one of the few effects that have a direct shortcut, with_multiply_speed -# the others are with_multiply_volume, resized, croped and rotated -rambo_clip = rambo_clip.with_effects([vfx.MultiplySpeed(0.5)]) - -# Because we modified timing of rambo_clip with our MultiplySpeed effect, we must re-assign the following clips timing -made_with_text = made_with_text.with_start(rambo_clip.end).with_duration(3) -moviepy_clip = moviepy_clip.with_start(made_with_text.start).with_duration(3) - -# Let's have a last look at the result to make sure everything is working as expected -quick_comp = CompositeVideoClip( - [ - intro_clip, - intro_text, - logo_clip, - bird_clip, - bird_text, - bunny_clip, - bunny_text, - rodents_clip, - rodents_text, - rambo_clip, - revenge_text, - made_with_text, - moviepy_clip, - ] -) -quick_comp.preview(fps=10) - - -############### -# CLIP FILTER # -############### -# Lets finish by modifying our rambo clip to make it sepia - - -# We will start by defining a function that turn a numpy image into sepia -# It takes the image as numpy array in entry and return the modified image as output -def sepia_fitler(frame: np.ndarray): - # Sepia filter transformation matrix - # Sepia transform works by applying to each pixel of the image the following rules - # res_R = (R * .393) + (G *.769) + (B * .189) - # res_G = (R * .349) + (G *.686) + (B * .168) - # res_B = (R * .272) + (G *.534) + (B * .131) - # - # With numpy we can do that very efficiently by multiplying the image matrix by a transformation matrix - sepia_matrix = np.array( - [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]] - ) - - # Convert the image to float32 format for matrix multiplication - frame = frame.astype(np.float32) - - # Apply the sepia transformation - # .T is needed because multiplying matrix of shape (n,m) * (m,k) result in a matrix of shape (n,k) - # what we want is (n,m), so we must transpose matrix (m,k) to (k,m) - sepia_image = np.dot(frame, sepia_matrix.T) - - # Because final result can be > 255, we limit the result to range [0, 255] - sepia_image = np.clip(sepia_image, 0, 255) - - # Convert the image back to uint8 format, because we need integer not float - sepia_image = sepia_image.astype(np.uint8) - - return sepia_image - - -# Now, we simply apply the filter to our clip by calling image_transform, which will call our filter on every frame -rambo_clip = rambo_clip.image_transform(sepia_fitler) - -# Let's see how our filter look -rambo_clip.preview(fps=10) - - -################## -# CLIP RENDERING # -################## -# Everything is good and ready, we can finally render our clip into a file -final_clip = CompositeVideoClip( - [ - intro_clip, - intro_text, - logo_clip, - bird_clip, - bird_text, - bunny_clip, - bunny_text, - rodents_clip, - rodents_text, - rambo_clip, - revenge_text, - made_with_text, - moviepy_clip, - ] -) -final_clip.write_videofile("./result.mp4") diff --git a/_static/code/getting_started/quick_presentation/basic_example.py b/_static/code/getting_started/quick_presentation/basic_example.py deleted file mode 100644 index 2a32d0ec8..000000000 --- a/_static/code/getting_started/quick_presentation/basic_example.py +++ /dev/null @@ -1,22 +0,0 @@ -# Import everything needed to edit video clips -from moviepy import * - -# Load file example.mp4 and extract only the subclip from 00:00:10 to 00:00:20 -clip = VideoFileClip("long_examples/example2.mp4").with_subclip(10, 20) - -# Reduce the audio volume to 80% of his original volume -clip = clip.with_multiply_volume(0.8) - -# Generate a text clip. You can customize the font, color, etc. -txt_clip = TextClip( - font="example.ttf", text="Big Buck Bunny", font_size=70, color="white" -) - -# Say that you want it to appear for 10s at the center of the screen -txt_clip = txt_clip.with_position("center").with_duration(10) - -# Overlay the text clip on the first video clip -video = CompositeVideoClip([clip, txt_clip]) - -# Write the result to a file (many options available!) -video.write_videofile("result.mp4") diff --git a/_static/code/user_guide/compositing/CompositeAudioClip.py b/_static/code/user_guide/compositing/CompositeAudioClip.py deleted file mode 100644 index a810bfcf1..000000000 --- a/_static/code/user_guide/compositing/CompositeAudioClip.py +++ /dev/null @@ -1,18 +0,0 @@ -from moviepy import * - -# We load all the clips we want to compose -aclip1 = AudioFileClip("example.wav") -aclip2 = AudioFileClip("example2.wav") -aclip3 = AudioFileClip("example3.wav") - -# All clip will play one after the other -concat = concatenate_audioclips([aclip1, aclip2, aclip3]) - -# We will play aclip1, then ontop of it aclip2 after 5s, and the aclip3 on top of both after 9s -compo = CompositeAudioClip( - [ - aclip1.with_multiply_volume(1.2), - aclip2.with_start(5), # start at t=5s - aclip3.with_start(9), - ] -) diff --git a/_static/code/user_guide/compositing/CompositeVideoClip.py b/_static/code/user_guide/compositing/CompositeVideoClip.py deleted file mode 100644 index 1c1785506..000000000 --- a/_static/code/user_guide/compositing/CompositeVideoClip.py +++ /dev/null @@ -1,10 +0,0 @@ -from moviepy import * - -# We load all the clips we want to compose -clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1) -clip3 = VideoFileClip("example3.mp4") - -# We concatenate them and write theme stacked on top of each other, with clip3 over clip2 over clip1 -final_clip = CompositeVideoClip([clip1, clip2, clip3]) -final_clip.write_videofile("final_clip.mp4") diff --git a/_static/code/user_guide/compositing/concatenate.py b/_static/code/user_guide/compositing/concatenate.py deleted file mode 100644 index edc99137c..000000000 --- a/_static/code/user_guide/compositing/concatenate.py +++ /dev/null @@ -1,10 +0,0 @@ -from moviepy import VideoFileClip, concatenate_videoclips - -# We load all the clips we want to concatenate -clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1) -clip3 = VideoFileClip("example3.mp4") - -# We concatenate them and write the result -final_clip = concatenate_videoclips([clip1, clip2, clip3]) -final_clip.write_videofile("final_clip.mp4") diff --git a/_static/code/user_guide/compositing/crossfadein.py b/_static/code/user_guide/compositing/crossfadein.py deleted file mode 100644 index ffdae5d2e..000000000 --- a/_static/code/user_guide/compositing/crossfadein.py +++ /dev/null @@ -1,18 +0,0 @@ -from moviepy import * - -# We load all the clips we want to compose -clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1) - -# Clip2 will be on top of clip1 for 1s -clip1 = clip1.with_end(2) -clip2 = clip2.with_start(1) - -# We will add a crossfadein on clip2 for 1s -# As the other effects, transitions are added to Clip methods at runtime -clip2 = clip2.with_effects([vfx.CrossFadeIn(1)]) - - -# We write the result -final_clip = CompositeVideoClip([clip1, clip2]) -final_clip.write_videofile("final_clip.mp4") diff --git a/_static/code/user_guide/compositing/juxtaposing.py b/_static/code/user_guide/compositing/juxtaposing.py deleted file mode 100644 index 3d9b6d9d9..000000000 --- a/_static/code/user_guide/compositing/juxtaposing.py +++ /dev/null @@ -1,16 +0,0 @@ -from moviepy import VideoFileClip, clips_array, vfx - -# We will use the same clip and transform it in 3 ways -clip1 = VideoFileClip("example.mp4").with_effects([vfx.Margin(10)]) # add 10px contour -clip2 = clip1.with_effects([vfx.MirrorX()]) # Flip horizontaly -clip3 = clip1.with_effects([vfx.MirrorY()]) # Flip verticaly -clip4 = clip1.resized(0.6) # downsize to 60% of original - -# The form of the final clip will depend of the shape of the array -# We want our clip to be our 4 videos, 2x2, so we make an array of 2x2 -final_clip = clips_array([[clip1, clip2], [clip3, clip4]]) -final_clip = final_clip.resized( - width=480 -) # We resize the resulting clip to have the dimensions we want - -final_clip.write_videofile("final_clip.mp4") diff --git a/_static/code/user_guide/compositing/with_position.py b/_static/code/user_guide/compositing/with_position.py deleted file mode 100644 index bd801d3f5..000000000 --- a/_static/code/user_guide/compositing/with_position.py +++ /dev/null @@ -1,51 +0,0 @@ -from moviepy import * - -# We load all the clips we want to compose -background = VideoFileClip("example2.mp4").with_subclip(0, 2) -title = TextClip( - "./example.ttf", - text="Big Buck Bunny", - font_size=80, - color="#fff", - text_align="center", - duration=1, -) -author = TextClip( - "./example.ttf", - text="Blender Foundation", - font_size=40, - color="#fff", - text_align="center", - duration=1, -) -copyright = TextClip( - "./example.ttf", - text="© CC BY 3.0", - font_size=20, - color="#fff", - text_align="center", - duration=1, -) -logo = ImageClip("./example2.png", duration=1).resized(height=50) - -# We want our title to be at the center horizontaly and start at 25% of the video verticaly -# We can set as "center", "left", "right", "top" and "bottom", and % relative from the clip size -title = title.with_position(("center", 0.25), relative=True) - -# We want the author to be in the center, 30px under the title -# We can set as pixels -top = background.h * 0.25 + title.h + 30 -left = (background.w - author.w) / 2 -author = author.with_position((left, top)) - -# We want the copyright to be 30px before bottom -copyright = copyright.with_position(("center", background.h - copyright.h - 30)) - -# Finally, we want the logo to be in the center, but to drop as time pass -# We can do so by setting position as a function that take time as argument, a lot like make_frame -top = (background.h - logo.h) / 2 -logo = logo.with_position(lambda t: ("center", top + t * 30)) - -# We write the result -final_clip = CompositeVideoClip([background, title, author, copyright, logo]) -final_clip.write_videofile("final_clip.mp4") diff --git a/_static/code/user_guide/compositing/with_start.py b/_static/code/user_guide/compositing/with_start.py deleted file mode 100644 index e59848da4..000000000 --- a/_static/code/user_guide/compositing/with_start.py +++ /dev/null @@ -1,21 +0,0 @@ -from moviepy import * - -# We load all the clips we want to compose -clip1 = VideoFileClip("example.mp4") -clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1) -clip3 = VideoFileClip("example3.mp4") - -# We want to stop clip1 after 1s -clip1 = clip1.with_end(1) - -# We want to play clip2 after 1.5s -clip2 = clip2.with_start(1.5) - -# We want to play clip3 at the end of clip2, and so for 3 seconds only -clip3 = clip3.with_start(clip2.end).with_duration( - 1 -) # Some times its more practical to modify the duration of a clip instead of his end - -# We write the result -final_clip = CompositeVideoClip([clip1, clip2, clip3]) -final_clip.write_videofile("final_clip.mp4") diff --git a/_static/code/user_guide/effects/custom_effect.py b/_static/code/user_guide/effects/custom_effect.py deleted file mode 100644 index 51bf04b95..000000000 --- a/_static/code/user_guide/effects/custom_effect.py +++ /dev/null @@ -1,31 +0,0 @@ -from moviepy import VideoClip -from moviepy.decorators import requires_duration - - -# Here you see a decorator that will verify if our clip have a duration -# MoviePy offer a few of thoses that may come handy when writing your own effects -@requires_duration -def progress_bar(clip: VideoClip, color: tuple, height: int = 10): - """ - Add a progress bar at the bottom of our clip - - Parameters - ---------- - - color: Color of the bar as a RGB tuple - height: The height of the bar in pixels. Default = 10 - """ - - # Because we have define the filter func inside our global effect, - # it have access to global effect scope and can use clip from inside filter - def filter(get_frame, t): - progression = t / clip.duration - bar_width = int(progression * clip.w) - - # Showing a progress bar is just replacing bottom pixels on some part of our frame - frame = get_frame(t) - frame[-height:, 0:bar_width] = color - - return frame - - return clip.transform(filter, apply_to="mask") diff --git a/_static/code/user_guide/effects/image_transform.py b/_static/code/user_guide/effects/image_transform.py deleted file mode 100644 index 2983b8125..000000000 --- a/_static/code/user_guide/effects/image_transform.py +++ /dev/null @@ -1,11 +0,0 @@ -from moviepy import VideoFileClip -import numpy - -my_clip = VideoFileClip("example.mp4") - - -def invert_green_blue(image: numpy.ndarray) -> numpy.ndarray: - return image[:, :, [0, 2, 1]] - - -modified_clip1 = my_clip.image_transform(invert_green_blue) diff --git a/_static/code/user_guide/effects/modify_copy_example.py b/_static/code/user_guide/effects/modify_copy_example.py deleted file mode 100644 index 49595e949..000000000 --- a/_static/code/user_guide/effects/modify_copy_example.py +++ /dev/null @@ -1,19 +0,0 @@ -# Import everything needed to edit video clips -from moviepy import * - -# Load example.mp4 -clip = VideoFileClip("example.mp4") - -# This does nothing, as multiply_volume will return a copy of clip which you will loose immediatly as you dont store it -# If you was to render clip now, the audio would still be at full volume -clip.with_multiply_volume(0.1) - -# This create a copy of clip in clip_whisper with a volume of only 10% the original, but does not modify the original clip -# If you was to render clip right now, the audio would still be at full volume -# If you was to render clip_whisper, the audio would be a 10% of the original volume -clip_whisper = clip.with_multiply_volume(0.1) - -# This replace the original clip with a copy of it where volume is only 10% of the original -# If you was to render clip now, the audio would be at 10% -# The original clip is now lost -clip = clip.with_multiply_volume(0.1) diff --git a/_static/code/user_guide/effects/time_transform.py b/_static/code/user_guide/effects/time_transform.py deleted file mode 100644 index c5ccad256..000000000 --- a/_static/code/user_guide/effects/time_transform.py +++ /dev/null @@ -1,15 +0,0 @@ -from moviepy import VideoFileClip -import math - -my_clip = VideoFileClip("example.mp4") - - -# You can define a function the classical way -def accel_x3(time: float) -> float: - return time * 3 - - -modified_clip1 = my_clip.time_transform(accel_x3) - -# Of you can also use lambda function -modified_clip2 = my_clip.time_transform(lambda t: 1 + math.sin(t)) diff --git a/_static/code/user_guide/effects/transform.py b/_static/code/user_guide/effects/transform.py deleted file mode 100644 index 38678f865..000000000 --- a/_static/code/user_guide/effects/transform.py +++ /dev/null @@ -1,17 +0,0 @@ -from moviepy import VideoFileClip -import math - -my_clip = VideoFileClip("example.mp4") - - -def scroll(get_frame, t): - """ - This function returns a 'region' of the current frame. - The position of this region depends on the time. - """ - frame = get_frame(t) - frame_region = frame[int(t) : int(t) + 360, :] - return frame_region - - -modified_clip1 = my_clip.transform(scroll) diff --git a/_static/code/user_guide/effects/using_effects.py b/_static/code/user_guide/effects/using_effects.py deleted file mode 100644 index 8cf6cc933..000000000 --- a/_static/code/user_guide/effects/using_effects.py +++ /dev/null @@ -1,15 +0,0 @@ -from moviepy import VideoFileClip -from moviepy import vfx, afx - -myclip = VideoFileClip("example.mp4") -myclip = myclip.with_effects( - [vfx.Resize(width=460)] -) # resize clip to be 460px in width, keeping aspect ratio - -# fx method return a copy of the clip, so we can easily chain them -myclip = myclip.with_effects( - [vfx.MultiplySpeed(2), afx.MultiplyVolume(0.5)] -) # double the speed and half the audio volume - -# because effects are added to Clip at runtime, you can also call them directly from your clip as methods -myclip = myclip.with_effects([vfx.MultiplyColor(0.5)]) # darken the clip diff --git a/_static/code/user_guide/effects/using_with_methods.py b/_static/code/user_guide/effects/using_with_methods.py deleted file mode 100644 index 0267dc189..000000000 --- a/_static/code/user_guide/effects/using_with_methods.py +++ /dev/null @@ -1,6 +0,0 @@ -from moviepy import VideoFileClip -from moviepy import vfx, afx - -myclip = VideoFileClip("example.mp4") -myclip = myclip.with_end(5) # stop the clip after 5 sec -myclip = myclip.without_audio() # remove the audio of the clip diff --git a/_static/code/user_guide/loading/AudioArrayClip.py b/_static/code/user_guide/loading/AudioArrayClip.py deleted file mode 100644 index 1ed74ade4..000000000 --- a/_static/code/user_guide/loading/AudioArrayClip.py +++ /dev/null @@ -1,34 +0,0 @@ -import numpy as np -from moviepy import * - -# We want to play those notes -notes = {"A": 440, "B": 494, "C": 523, "D": 587, "E": 659, "F": 698} - -note_duration = 0.5 -total_duration = len(notes) * note_duration -sample_rate = 44100 # Number of samples per second - -note_size = int(note_duration * sample_rate) -total_size = note_size * len(notes) - - -def make_frame(t, note_frequency): - return np.sin(note_frequency * 2 * np.pi * t) - - -# We generate all frames timepoints -times = np.linspace(0, total_duration, total_size) - -# We make an array of size N*1, where N is the number of frames * total duration -audio_array = np.zeros((total_size, 2)) -i = 0 -for note, frequency in notes.items(): - for _ in range(note_size): - audio_array[i][0] = make_frame(times[i], frequency) - i += 1 - -# Create an AudioArrayClip from the audio samples -audio_clip = AudioArrayClip(audio_array, fps=sample_rate) - -# Write the audio clip to a WAV file -audio_clip.write_audiofile("result.wav", fps=44100) diff --git a/_static/code/user_guide/loading/AudioClip.py b/_static/code/user_guide/loading/AudioClip.py deleted file mode 100644 index 37f51b4c7..000000000 --- a/_static/code/user_guide/loading/AudioClip.py +++ /dev/null @@ -1,8 +0,0 @@ -from moviepy import * -import numpy as np - -# Producing a sinewave of 440 Hz -> note A -make_frame_audio = lambda t: np.sin(440 * 2 * np.pi * t) - -# AUDIO CLIPS -clip = AudioClip(make_frame_audio, duration=3) diff --git a/_static/code/user_guide/loading/AudioFileClip.py b/_static/code/user_guide/loading/AudioFileClip.py deleted file mode 100644 index e9aecd89f..000000000 --- a/_static/code/user_guide/loading/AudioFileClip.py +++ /dev/null @@ -1,6 +0,0 @@ -from moviepy import * -import numpy as np - -# Works for audio files, but also videos file where you only want the keep the audio track -clip = AudioFileClip("example.wav") -clip.write_audiofile("./result.wav") diff --git a/_static/code/user_guide/loading/ColorClip.py b/_static/code/user_guide/loading/ColorClip.py deleted file mode 100644 index 9bf5d9677..000000000 --- a/_static/code/user_guide/loading/ColorClip.py +++ /dev/null @@ -1,8 +0,0 @@ -from moviepy import * - -myclip = ColorClip( - size=(200, 100), color=(255, 0, 0), duration=1 -) # Color is passed as a RGB tuple -myclip.write_videofile( - "result.mp4", fps=1 -) # We really dont need more than 1 fps do we ? diff --git a/_static/code/user_guide/loading/DataVideoClip.py b/_static/code/user_guide/loading/DataVideoClip.py deleted file mode 100644 index d44e04e49..000000000 --- a/_static/code/user_guide/loading/DataVideoClip.py +++ /dev/null @@ -1,25 +0,0 @@ -from moviepy import * -import numpy as np - -# Dataset will just be a list of colors as RGB -dataset = [ - (255, 0, 0), - (0, 255, 0), - (0, 0, 255), - (0, 255, 255), - (255, 0, 255), - (255, 255, 0), -] - - -# The function make frame take data and create an image of 200x100 px fill with the color -def make_frame(data): - frame = np.full((100, 200, 3), data, dtype=np.uint8) - return frame - - -# We create the DataVideoClip, and we set FPS at 2, making a 3s clip (because len(dataset) = 6, so 6/2=3) -myclip = DataVideoClip(data=dataset, data_to_frame=make_frame, fps=2) - -# Modifying fps here will change video FPS, not clip FPS -myclip.write_videofile("result.mp4", fps=30) diff --git a/_static/code/user_guide/loading/ImageClip.py b/_static/code/user_guide/loading/ImageClip.py deleted file mode 100644 index f704a5500..000000000 --- a/_static/code/user_guide/loading/ImageClip.py +++ /dev/null @@ -1,11 +0,0 @@ -from moviepy import * -import numpy as np - -# Random RGB noise image of 200x100 -noise_image = np.random.randint(low=0, high=255, size=(100, 200, 3)) - -myclip1 = ImageClip("example.png") # You can create it from a path -myclip2 = ImageClip(noise_image) # from a (height x width x 3) RGB numpy array -myclip3 = VideoFileClip("./example.mp4").to_ImageClip( - t="00:00:01" -) # Or load videoclip and extract frame at a given time diff --git a/_static/code/user_guide/loading/ImageSequenceClip.py b/_static/code/user_guide/loading/ImageSequenceClip.py deleted file mode 100644 index a19432987..000000000 --- a/_static/code/user_guide/loading/ImageSequenceClip.py +++ /dev/null @@ -1,27 +0,0 @@ -from moviepy import * - -# A clip with a list of images showed for 1 second each -myclip = ImageSequenceClip( - [ - "example_img_dir/image_0001.jpg", - "example_img_dir/image_0002.jpg", - "example_img_dir/image_0003.jpg", - ], - durations=[1, 1, 1], -) -print( - "Clip duration: {}".format(myclip.duration) -) # 3 images, 1 seconds each, duration = 3 -print("Clip fps: {}".format(myclip.fps)) # 3 seconds, 3 images, fps is 3/3 = 1 - -# This time we will load all images in the dir, and instead of showing theme for X seconds, we will define FPS -myclip2 = ImageSequenceClip("./example_img_dir", fps=30) -print( - "Clip duration: {}".format(myclip2.duration) -) # fps = 30, so duration = nb images in dir / 30 -print("Clip fps: {}".format(myclip2.fps)) # fps = 30 - -myclip.write_gif("result.gif") # the gif will be 3 sec and 1 fps -myclip2.write_gif( - "result2.gif" -) # the gif will be 30 fps, duration will vary based on number of images in dir diff --git a/_static/code/user_guide/loading/TextClip.py b/_static/code/user_guide/loading/TextClip.py deleted file mode 100644 index c3dd23105..000000000 --- a/_static/code/user_guide/loading/TextClip.py +++ /dev/null @@ -1,33 +0,0 @@ -from moviepy import * - -font = "./example.ttf" - -# First we use as string and let system autocalculate clip dimensions to fit the text -# we set clip duration to 2 secs, if we do not, it got an infinite duration -txt_clip1 = TextClip( - font=font, - text="Hello World !", - font_size=30, - color="#FF0000", - bg_color="#FFFFFF", - duration=2, -) # Red - -# This time we load text from a file, we set a fixed size for clip and let the system find best font size, -# allowing for line breaking -txt_clip2 = TextClip( - font=font, - filename="./example.txt", - size=(500, 200), - bg_color="#FFFFFF", - method="caption", - color=(0, 0, 255, 127), -) # Blue with 50% transparency - -# we set duration, because by default image clip are infinite, and we cannot render infinite -txt_clip2 = txt_clip2.with_duration(2) - -txt_clip1.write_videofile( - "result1.mp4", fps=24 -) # ImageClip have no FPS either, so we must defined it -txt_clip2.write_videofile("result2.mp4", fps=24) diff --git a/_static/code/user_guide/loading/UpdatedVideoClip.py b/_static/code/user_guide/loading/UpdatedVideoClip.py deleted file mode 100644 index c45f7192a..000000000 --- a/_static/code/user_guide/loading/UpdatedVideoClip.py +++ /dev/null @@ -1,58 +0,0 @@ -from moviepy import * -import numpy as np -import random - - -# Imagine we want to make a video that become more and more red as we repeat same face on coinflip in a row -# because coinflip are done in real time, we need to wait until a winning row is done to be able -# to make the next frame. -# This is a world simulating that. Sorry, it's hard to come up with examples... -class CoinFlipWorld: - def __init__(self, fps): - """ - FPS is usefull because we must increment clip_t by 1/FPS to have UpdatedVideoClip run with a certain FPS - - """ - self.clip_t = 0 - self.win_strike = 0 - self.reset = False - self.fps = fps - - def update(self): - if self.reset: - self.win_strike = 0 - self.reset = False - - print("strike : {}, clip_t : {}".format(self.win_strike, self.clip_t)) - print(self.win_strike) - - # 0 tails, 1 heads, this is our simulation of coinflip - choice = random.randint(0, 1) - face = random.randint(0, 1) - - # We win, we increment our serie and retry - if choice == face: - self.win_strike += 1 - return - - # Different face, we increment clip_t and set reset so we will reset on next update. - # We dont reset immediately because we will need current state to make frame - self.reset = True - self.clip_t += 1 / self.fps - - def to_frame(self): - red_intensity = 255 * ( - self.win_strike / 10 - ) # 100% red for 10 victories and more - red_intensity = min(red_intensity, 255) - - # A 200x100 image with red more or less intense based on number of victories in a row - return np.full((100, 200, 3), (red_intensity, 0, 0), dtype=np.uint8) - - -world = CoinFlipWorld(fps=5) - -myclip = UpdatedVideoClip(world=world, duration=10) -# We will set FPS to same as world, if we was to use a different FPS, the lowest from world.fps and our write_videofile fps param -# will be the real visible fps -myclip.write_videofile("result.mp4", fps=5) diff --git a/_static/code/user_guide/loading/VideoClip.py b/_static/code/user_guide/loading/VideoClip.py deleted file mode 100644 index 6d5485306..000000000 --- a/_static/code/user_guide/loading/VideoClip.py +++ /dev/null @@ -1,32 +0,0 @@ -from PIL import Image, ImageDraw -import numpy as np -from moviepy import * -import math - -WIDTH, HEIGHT = (128, 128) -RED = (255, 0, 0) - - -def make_frame(t): - frequency = 1 # One pulse per second - coef = 0.5 * (1 + math.sin(2 * math.pi * frequency * t)) # radius varies over time - radius = WIDTH * coef - - x1 = WIDTH / 2 - radius / 2 - y1 = HEIGHT / 2 - radius / 2 - x2 = WIDTH / 2 + radius / 2 - y2 = HEIGHT / 2 + radius / 2 - - img = Image.new("RGB", (WIDTH, HEIGHT)) - draw = ImageDraw.Draw(img) - draw.ellipse((x1, y1, x2, y2), fill=RED) - - return np.array(img) # returns a 8-bit RGB array - - -clip = VideoClip( - make_frame, duration=2 -) # we define a 2s duration for the clip to be able to render it later -clip.write_gif( - "circle.gif", fps=15 -) # we must set a framerate because VideoClip have no framerate by default diff --git a/_static/code/user_guide/loading/VideoFileClip.py b/_static/code/user_guide/loading/VideoFileClip.py deleted file mode 100644 index f798b6872..000000000 --- a/_static/code/user_guide/loading/VideoFileClip.py +++ /dev/null @@ -1,15 +0,0 @@ -from moviepy import * - -myclip = VideoFileClip("example.mp4") - -# video file clips already have fps and duration -print("Clip duration: {}".format(myclip.duration)) -print("Clip fps: {}".format(myclip.fps)) - -myclip = myclip.with_subclip(0.5, 2) # Cutting the clip between 0.5 and 2 secs. -print("Clip duration: {}".format(myclip.duration)) # Cuting will update duration -print("Clip fps: {}".format(myclip.fps)) # and keep fps - -myclip.write_videofile( - "result.mp4" -) # the output video will be 1.5 sec long and use original fps diff --git a/_static/code/user_guide/loading/closing.py b/_static/code/user_guide/loading/closing.py deleted file mode 100644 index c8d818ff1..000000000 --- a/_static/code/user_guide/loading/closing.py +++ /dev/null @@ -1,8 +0,0 @@ -from moviepy import * - -try: - with AudioFileClip("example.wav") as clip: - raise Exception("Let's simulate an exception") -except Exception as e: - print("{}".format(e)) -# clip.close() is implicitly called, so the lock on my_audiofile.mp3 file is immediately released. diff --git a/_static/code/user_guide/loading/loading.py b/_static/code/user_guide/loading/loading.py deleted file mode 100644 index 4eb9b7800..000000000 --- a/_static/code/user_guide/loading/loading.py +++ /dev/null @@ -1,35 +0,0 @@ -from moviepy import * -import numpy as np - -# Define some constants for later use -black = (255, 255, 255) # RGB for black -# Random noise image of 200x100 -make_frame = lambda t: np.random.randint(low=0, high=255, size=(100, 200, 3)) -# A note by producing a sinewave of 440 Hz -make_frame_audio = lambda t: np.sin(440 * 2 * np.pi * t) - -# Now lets see how to load different type of resources ! - -# VIDEO CLIPS` -clip = VideoClip( - make_frame, duration=5 -) # for custom animations, where make_frame is a function returning an image as numpy array for a given time -clip = VideoFileClip("example.mp4") # for videos -clip = ImageSequenceClip( - "example_img_dir", fps=24 -) # for a list or directory of images to be used as a video sequence -clip = ImageClip("example.png") # For a picture -clip = TextClip( - font="./example.ttf", text="Hello!", font_size=70, color="black" -) # To create the image of a text -clip = ColorClip( - size=(460, 380), color=black -) # a clip of a single unified color, where color is a RGB tuple/array/list - -# AUDIO CLIPS -clip = AudioFileClip( - "example.wav" -) # for audio files, but also videos where you only want the keep the audio track -clip = AudioClip( - make_frame_audio, duration=3 -) # for custom audio, where make_frame is a function returning a float (or tuple for stereo) for a given time diff --git a/_static/code/user_guide/loading/masks.py b/_static/code/user_guide/loading/masks.py deleted file mode 100644 index 1ebc002eb..000000000 --- a/_static/code/user_guide/loading/masks.py +++ /dev/null @@ -1,16 +0,0 @@ -from moviepy import * -import numpy as np - -# Random RGB noise image of 200x100 -makeframe = lambda t: np.random.rand(100, 200) - -# To define the VideoClip as a mask, just pass parameter is_mask as True -maskclip1 = VideoClip(makeframe, duration=4, is_mask=True) # A random noise mask -maskclip2 = ImageClip("example_mask.jpg", is_mask=True) # A fixed mask as jpeg -maskclip3 = VideoFileClip("example_mask.mp4", is_mask=True) # A video as a mask - -# Load our basic clip, resize to 200x100 and apply each mask -clip = VideoFileClip("example.mp4") -clip_masked1 = clip.with_mask(maskclip1) -clip_masked2 = clip.with_mask(maskclip2) -clip_masked3 = clip.with_mask(maskclip3) diff --git a/_static/code/user_guide/rendering/display_in_notebook.py b/_static/code/user_guide/rendering/display_in_notebook.py deleted file mode 100644 index 5ed5829e9..000000000 --- a/_static/code/user_guide/rendering/display_in_notebook.py +++ /dev/null @@ -1,25 +0,0 @@ -from moviepy import * - -# ... -# ... some jupyter specifics stuff -# ... - -my_video_clip = VideoFileClip("./example.mp4") -my_image_clip = ImageClip("./example.png") -my_audio_clip = AudioFileClip("./example.wav") - -# We can show any type of clip -my_video_clip.display_in_notebook() # embeds a video -my_image_clip.display_in_notebook() # embeds an image -my_audio_clip.display_in_notebook() # embeds a sound - -# We can display only a snaphot of a video -my_video_clip.display_in_notebook(t=1) - -# We can provide any valid HTML5 option as keyword argument -# For instance, if the clip is too big, we can set width -my_video_clip.display_in_notebook(width=400) - -# We can also make it loop, for example to check if a GIF is -# looping as expected -my_video_clip.display_in_notebook(autoplay=1, loop=1) diff --git a/_static/code/user_guide/rendering/preview.py b/_static/code/user_guide/rendering/preview.py deleted file mode 100644 index be7ff92ff..000000000 --- a/_static/code/user_guide/rendering/preview.py +++ /dev/null @@ -1,13 +0,0 @@ -from moviepy import * - -myclip = VideoFileClip("./example.mp4").with_subclip(0, 1) # Keep only 0 to 1 sec - -# We preview our clip as a video, inheriting FPS and audio of the original clip -myclip.preview() - -# We preview our clip as video, but with a custom FPS for video and audio -# making it less consuming for our computer -myclip.preview(fps=5, audio_fps=11000) - -# Now we preview without audio -myclip.preview(audio=False) diff --git a/_static/code/user_guide/rendering/save_frame.py b/_static/code/user_guide/rendering/save_frame.py deleted file mode 100644 index 89a265121..000000000 --- a/_static/code/user_guide/rendering/save_frame.py +++ /dev/null @@ -1,5 +0,0 @@ -from moviepy import * - -# We load all the clips we want to compose -myclip = VideoFileClip("example.mp4") -myclip.save_frame("result.png", t=1) # Save frame at 1 sec diff --git a/_static/code/user_guide/rendering/show.py b/_static/code/user_guide/rendering/show.py deleted file mode 100644 index f4959b052..000000000 --- a/_static/code/user_guide/rendering/show.py +++ /dev/null @@ -1,12 +0,0 @@ -from moviepy import * - -myclip = VideoFileClip("./example.mp4") - -# We show the first frame of our clip -myclip.show() - -# We show the frame at point 00:00:01.5 of our clip -myclip.show(1.5) - -# We want to see our clip without applying his mask -myclip.show(1.5, with_mask=False) diff --git a/_static/code/user_guide/rendering/write_gif.py b/_static/code/user_guide/rendering/write_gif.py deleted file mode 100644 index 8f28aa4be..000000000 --- a/_static/code/user_guide/rendering/write_gif.py +++ /dev/null @@ -1,9 +0,0 @@ -from moviepy import * - -myclip = VideoFileClip("example.mp4").with_subclip(0, 2) - -# Here we just save as GIF -myclip.write_gif("result.gif") - -# Here we save as GIF, but we set the FPS of our GIF at 10 -myclip.write_gif("result.gif", fps=10) diff --git a/_static/code/user_guide/rendering/write_images_sequence.py b/_static/code/user_guide/rendering/write_images_sequence.py deleted file mode 100644 index 55900c452..000000000 --- a/_static/code/user_guide/rendering/write_images_sequence.py +++ /dev/null @@ -1,11 +0,0 @@ -from moviepy import * -import os - -myclip = VideoFileClip("example.mp4") - -# Here we just save in dir output with filename being his index (start at 0, then +1 for each frame) -os.mkdir("./output") -myclip.write_images_sequence("./output/%d.jpg") - -# We set the FPS of our GIF at 10, and we leftpad name with 0 up to 4 digits -myclip.write_images_sequence("./output/%04d.jpg") diff --git a/_static/code/user_guide/rendering/write_videofile.py b/_static/code/user_guide/rendering/write_videofile.py deleted file mode 100644 index e42538a6b..000000000 --- a/_static/code/user_guide/rendering/write_videofile.py +++ /dev/null @@ -1,32 +0,0 @@ -from moviepy import * - -# We load all the clips we want to compose -background = VideoFileClip("long_examples/example2.mp4").with_subclip(0, 10) -title = TextClip( - "./example.ttf", - text="Big Buck Bunny", - font_size=80, - color="#fff", - text_align="center", - duration=3, -).with_position(("center", "center")) - -# We make our final clip through composition -final_clip = CompositeVideoClip([background, title]) - -# And finally we can write the result into a file - -# Here we just save as MP4, inheriting FPS, etc. from final_clip -final_clip.write_videofile("result.mp4") - -# Here we save as MP4, but we set the FPS of the clip to our own, here 24 fps, like cinema -final_clip.write_videofile("result24fps.mp4", fps=24) - -# Now we save as WEBM instead, and we want tu use codec libvpx-vp9 (usefull when mp4 + transparency). -# We also want ffmpeg compression optimisation as minimal as possible. This will not change -# the video quality and it will decrease time for encoding, but increase final file size a lot. -# Finally, we want ffmpeg to use 4 threads for video encoding. You should probably leave that -# to default, as ffmpeg is already quite good at using the best setting on his own. -final_clip.write_videofile( - "result.webm", codec="libvpx-vp9", fps=24, preset="ultrafast", threads=4 -) diff --git a/_static/code/user_guide/rendering/write_videofile_duration.py b/_static/code/user_guide/rendering/write_videofile_duration.py deleted file mode 100644 index 99086016f..000000000 --- a/_static/code/user_guide/rendering/write_videofile_duration.py +++ /dev/null @@ -1,13 +0,0 @@ -from moviepy import * - -# By default an ImageClip has no duration -my_clip = ImageClip("example.png") - -try: - # This will fail! We cannot write a clip with no duration! - my_clip.write_videofile("result.mp4") -except: - print("Cannot write a video without duration") - -# By calling with_duration on our clip, we fix the problem! We also need to set fps -my_clip.with_duration(2).write_videofile("result.mp4", fps=1) diff --git a/_static/design-tabs.js b/_static/design-tabs.js deleted file mode 100644 index b25bd6a4f..000000000 --- a/_static/design-tabs.js +++ /dev/null @@ -1,101 +0,0 @@ -// @ts-check - -// Extra JS capability for selected tabs to be synced -// The selection is stored in local storage so that it persists across page loads. - -/** - * @type {Record} - */ -let sd_id_to_elements = {}; -const storageKeyPrefix = "sphinx-design-tab-id-"; - -/** - * Create a key for a tab element. - * @param {HTMLElement} el - The tab element. - * @returns {[string, string, string] | null} - The key. - * - */ -function create_key(el) { - let syncId = el.getAttribute("data-sync-id"); - let syncGroup = el.getAttribute("data-sync-group"); - if (!syncId || !syncGroup) return null; - return [syncGroup, syncId, syncGroup + "--" + syncId]; -} - -/** - * Initialize the tab selection. - * - */ -function ready() { - // Find all tabs with sync data - - /** @type {string[]} */ - let groups = []; - - document.querySelectorAll(".sd-tab-label").forEach((label) => { - if (label instanceof HTMLElement) { - let data = create_key(label); - if (data) { - let [group, id, key] = data; - - // add click event listener - // @ts-ignore - label.onclick = onSDLabelClick; - - // store map of key to elements - if (!sd_id_to_elements[key]) { - sd_id_to_elements[key] = []; - } - sd_id_to_elements[key].push(label); - - if (groups.indexOf(group) === -1) { - groups.push(group); - // Check if a specific tab has been selected via URL parameter - const tabParam = new URLSearchParams(window.location.search).get( - group - ); - if (tabParam) { - console.log( - "sphinx-design: Selecting tab id for group '" + - group + - "' from URL parameter: " + - tabParam - ); - window.sessionStorage.setItem(storageKeyPrefix + group, tabParam); - } - } - - // Check is a specific tab has been selected previously - let previousId = window.sessionStorage.getItem( - storageKeyPrefix + group - ); - if (previousId === id) { - // console.log( - // "sphinx-design: Selecting tab from session storage: " + id - // ); - // @ts-ignore - label.previousElementSibling.checked = true; - } - } - } - }); -} - -/** - * Activate other tabs with the same sync id. - * - * @this {HTMLElement} - The element that was clicked. - */ -function onSDLabelClick() { - let data = create_key(this); - if (!data) return; - let [group, id, key] = data; - for (const label of sd_id_to_elements[key]) { - if (label === this) continue; - // @ts-ignore - label.previousElementSibling.checked = true; - } - window.sessionStorage.setItem(storageKeyPrefix + group, id); -} - -document.addEventListener("DOMContentLoaded", ready, false); diff --git a/_static/doctools.js b/_static/doctools.js deleted file mode 100644 index d06a71d75..000000000 --- a/_static/doctools.js +++ /dev/null @@ -1,156 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Base JavaScript utilities for all Sphinx HTML documentation. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ -"use strict"; - -const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ - "TEXTAREA", - "INPUT", - "SELECT", - "BUTTON", -]); - -const _ready = (callback) => { - if (document.readyState !== "loading") { - callback(); - } else { - document.addEventListener("DOMContentLoaded", callback); - } -}; - -/** - * Small JavaScript module for the documentation. - */ -const Documentation = { - init: () => { - Documentation.initDomainIndexTable(); - Documentation.initOnKeyListeners(); - }, - - /** - * i18n support - */ - TRANSLATIONS: {}, - PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), - LOCALE: "unknown", - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext: (string) => { - const translated = Documentation.TRANSLATIONS[string]; - switch (typeof translated) { - case "undefined": - return string; // no translation - case "string": - return translated; // translation exists - default: - return translated[0]; // (singular, plural) translation tuple exists - } - }, - - ngettext: (singular, plural, n) => { - const translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated !== "undefined") - return translated[Documentation.PLURAL_EXPR(n)]; - return n === 1 ? singular : plural; - }, - - addTranslations: (catalog) => { - Object.assign(Documentation.TRANSLATIONS, catalog.messages); - Documentation.PLURAL_EXPR = new Function( - "n", - `return (${catalog.plural_expr})` - ); - Documentation.LOCALE = catalog.locale; - }, - - /** - * helper function to focus on search bar - */ - focusSearchBar: () => { - document.querySelectorAll("input[name=q]")[0]?.focus(); - }, - - /** - * Initialise the domain index toggle buttons - */ - initDomainIndexTable: () => { - const toggler = (el) => { - const idNumber = el.id.substr(7); - const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); - if (el.src.substr(-9) === "minus.png") { - el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; - toggledRows.forEach((el) => (el.style.display = "none")); - } else { - el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; - toggledRows.forEach((el) => (el.style.display = "")); - } - }; - - const togglerElements = document.querySelectorAll("img.toggler"); - togglerElements.forEach((el) => - el.addEventListener("click", (event) => toggler(event.currentTarget)) - ); - togglerElements.forEach((el) => (el.style.display = "")); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); - }, - - initOnKeyListeners: () => { - // only install a listener if it is really needed - if ( - !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && - !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS - ) - return; - - document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.altKey || event.ctrlKey || event.metaKey) return; - - if (!event.shiftKey) { - switch (event.key) { - case "ArrowLeft": - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; - - const prevLink = document.querySelector('link[rel="prev"]'); - if (prevLink && prevLink.href) { - window.location.href = prevLink.href; - event.preventDefault(); - } - break; - case "ArrowRight": - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; - - const nextLink = document.querySelector('link[rel="next"]'); - if (nextLink && nextLink.href) { - window.location.href = nextLink.href; - event.preventDefault(); - } - break; - } - } - - // some keyboard layouts may need Shift to get / - switch (event.key) { - case "/": - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; - Documentation.focusSearchBar(); - event.preventDefault(); - } - }); - }, -}; - -// quick alias for translations -const _ = Documentation.gettext; - -_ready(Documentation.init); diff --git a/_static/language_data.js b/_static/language_data.js deleted file mode 100644 index 250f5665f..000000000 --- a/_static/language_data.js +++ /dev/null @@ -1,199 +0,0 @@ -/* - * language_data.js - * ~~~~~~~~~~~~~~~~ - * - * This script contains the language-specific data used by searchtools.js, - * namely the list of stopwords, stemmer, scorer and splitter. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; - - -/* Non-minified version is copied as a separate JS file, is available */ - -/** - * Porter Stemmer - */ -var Stemmer = function() { - - var step2list = { - ational: 'ate', - tional: 'tion', - enci: 'ence', - anci: 'ance', - izer: 'ize', - bli: 'ble', - alli: 'al', - entli: 'ent', - eli: 'e', - ousli: 'ous', - ization: 'ize', - ation: 'ate', - ator: 'ate', - alism: 'al', - iveness: 'ive', - fulness: 'ful', - ousness: 'ous', - aliti: 'al', - iviti: 'ive', - biliti: 'ble', - logi: 'log' - }; - - var step3list = { - icate: 'ic', - ative: '', - alize: 'al', - iciti: 'ic', - ical: 'ic', - ful: '', - ness: '' - }; - - var c = "[^aeiou]"; // consonant - var v = "[aeiouy]"; // vowel - var C = c + "[^aeiouy]*"; // consonant sequence - var V = v + "[aeiou]*"; // vowel sequence - - var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 - var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 - var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 - var s_v = "^(" + C + ")?" + v; // vowel in stem - - this.stemWord = function (w) { - var stem; - var suffix; - var firstch; - var origword = w; - - if (w.length < 3) - return w; - - var re; - var re2; - var re3; - var re4; - - firstch = w.substr(0,1); - if (firstch == "y") - w = firstch.toUpperCase() + w.substr(1); - - // Step 1a - re = /^(.+?)(ss|i)es$/; - re2 = /^(.+?)([^s])s$/; - - if (re.test(w)) - w = w.replace(re,"$1$2"); - else if (re2.test(w)) - w = w.replace(re2,"$1$2"); - - // Step 1b - re = /^(.+?)eed$/; - re2 = /^(.+?)(ed|ing)$/; - if (re.test(w)) { - var fp = re.exec(w); - re = new RegExp(mgr0); - if (re.test(fp[1])) { - re = /.$/; - w = w.replace(re,""); - } - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1]; - re2 = new RegExp(s_v); - if (re2.test(stem)) { - w = stem; - re2 = /(at|bl|iz)$/; - re3 = new RegExp("([^aeiouylsz])\\1$"); - re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re2.test(w)) - w = w + "e"; - else if (re3.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - else if (re4.test(w)) - w = w + "e"; - } - } - - // Step 1c - re = /^(.+?)y$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(s_v); - if (re.test(stem)) - w = stem + "i"; - } - - // Step 2 - re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step2list[suffix]; - } - - // Step 3 - re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step3list[suffix]; - } - - // Step 4 - re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; - re2 = /^(.+?)(s|t)(ion)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - if (re.test(stem)) - w = stem; - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1] + fp[2]; - re2 = new RegExp(mgr1); - if (re2.test(stem)) - w = stem; - } - - // Step 5 - re = /^(.+?)e$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - re2 = new RegExp(meq1); - re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) - w = stem; - } - re = /ll$/; - re2 = new RegExp(mgr1); - if (re.test(w) && re2.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - - // and turn initial Y back to y - if (firstch == "y") - w = firstch.toLowerCase() + w.substr(1); - return w; - } -} - diff --git a/_static/medias/accel_decel-fx-params.png b/_static/medias/accel_decel-fx-params.png deleted file mode 100644 index cf9dbf9ef..000000000 Binary files a/_static/medias/accel_decel-fx-params.png and /dev/null differ diff --git a/_static/medias/getting_started/explanations.jpeg b/_static/medias/getting_started/explanations.jpeg deleted file mode 100644 index cd71e5876..000000000 Binary files a/_static/medias/getting_started/explanations.jpeg and /dev/null differ diff --git a/_static/medias/getting_started/moviepy_10_minutes/moviepy_10_minutes.zip b/_static/medias/getting_started/moviepy_10_minutes/moviepy_10_minutes.zip deleted file mode 100644 index 1ded32690..000000000 Binary files a/_static/medias/getting_started/moviepy_10_minutes/moviepy_10_minutes.zip and /dev/null differ diff --git a/_static/medias/getting_started/moviepy_10_minutes/trailer_bbb.mp4 b/_static/medias/getting_started/moviepy_10_minutes/trailer_bbb.mp4 deleted file mode 100644 index 84d8ee292..000000000 Binary files a/_static/medias/getting_started/moviepy_10_minutes/trailer_bbb.mp4 and /dev/null differ diff --git a/_static/medias/index_api.svg b/_static/medias/index_api.svg deleted file mode 100644 index 69f7ba1d2..000000000 --- a/_static/medias/index_api.svg +++ /dev/null @@ -1,97 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - diff --git a/_static/medias/index_contribute.svg b/_static/medias/index_contribute.svg deleted file mode 100644 index de3d90237..000000000 --- a/_static/medias/index_contribute.svg +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - - - - diff --git a/_static/medias/index_getting_started.svg b/_static/medias/index_getting_started.svg deleted file mode 100644 index 2d36622cb..000000000 --- a/_static/medias/index_getting_started.svg +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - diff --git a/_static/medias/index_user_guide.svg b/_static/medias/index_user_guide.svg deleted file mode 100644 index bd1705351..000000000 --- a/_static/medias/index_user_guide.svg +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - image/svg+xml - - - - - - - - - diff --git a/_static/medias/logo_small.jpeg b/_static/medias/logo_small.jpeg deleted file mode 100644 index da83da04a..000000000 Binary files a/_static/medias/logo_small.jpeg and /dev/null differ diff --git a/_static/medias/user_guide/circle.gif b/_static/medias/user_guide/circle.gif deleted file mode 100644 index c00cbd16d..000000000 Binary files a/_static/medias/user_guide/circle.gif and /dev/null differ diff --git a/_static/medias/user_guide/stacked.jpeg b/_static/medias/user_guide/stacked.jpeg deleted file mode 100644 index 7be3748a6..000000000 Binary files a/_static/medias/user_guide/stacked.jpeg and /dev/null differ diff --git a/_static/medias/user_guide/videoWH.jpeg b/_static/medias/user_guide/videoWH.jpeg deleted file mode 100644 index cf5c339e2..000000000 Binary files a/_static/medias/user_guide/videoWH.jpeg and /dev/null differ diff --git a/_static/moviepy.css b/_static/moviepy.css deleted file mode 100644 index deb697ef7..000000000 --- a/_static/moviepy.css +++ /dev/null @@ -1,61 +0,0 @@ -@import url(flasky.css) -/* Override some aspects of the pydata-sphinx-theme */ - -.indexwrapper .sphinxsidebar { visibility: hidden; } - -.logo img.logo { width: 120px; height: 120px; padding-right: 30px; } - -div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 - { font-family: 'Times New Roman', 'Garamond', 'Georgia', serif; } - - -:root { - /* Use softer blue from bootstrap's default info color */ - --pst-color-info: 23, 162, 184; -} - -table { - width: auto; /* Override fit-content which breaks Styler user guide ipynb */ -} - -/* Main index page overview cards */ - -.intro-card { - padding: 30px 10px 20px 10px; -} - -.intro-card .sd-card-img-top { - margin: 10px; - height: 52px; - background: none !important; -} - -.intro-card .sd-card-title { - color: var(--pst-color-primary); - font-size: var(--pst-font-size-h5); - padding: 1rem 0rem 0.5rem 0rem; -} - -.intro-card .sd-card-footer { - border: none !important; -} - -.intro-card .sd-card-footer p.sd-card-text { - max-width: 220px; - margin-left: auto; - margin-right: auto; -} - -.intro-card .sd-btn-secondary { - background-color: #6c757d !important; - border-color: #6c757d !important; -} - -.intro-card .sd-btn-secondary:hover { - background-color: #5a6268 !important; - border-color: #545b62 !important; -} - -.card, .card img { - background-color: var(--pst-color-background); -} diff --git a/_static/pygments.css b/_static/pygments.css deleted file mode 100644 index 012e6a00a..000000000 --- a/_static/pygments.css +++ /dev/null @@ -1,152 +0,0 @@ -html[data-theme="light"] .highlight pre { line-height: 125%; } -html[data-theme="light"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -html[data-theme="light"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -html[data-theme="light"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -html[data-theme="light"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -html[data-theme="light"] .highlight .hll { background-color: #fae4c2 } -html[data-theme="light"] .highlight { background: #fefefe; color: #080808 } -html[data-theme="light"] .highlight .c { color: #515151 } /* Comment */ -html[data-theme="light"] .highlight .err { color: #a12236 } /* Error */ -html[data-theme="light"] .highlight .k { color: #6730c5 } /* Keyword */ -html[data-theme="light"] .highlight .l { color: #7f4707 } /* Literal */ -html[data-theme="light"] .highlight .n { color: #080808 } /* Name */ -html[data-theme="light"] .highlight .o { color: #00622f } /* Operator */ -html[data-theme="light"] .highlight .p { color: #080808 } /* Punctuation */ -html[data-theme="light"] .highlight .ch { color: #515151 } /* Comment.Hashbang */ -html[data-theme="light"] .highlight .cm { color: #515151 } /* Comment.Multiline */ -html[data-theme="light"] .highlight .cp { color: #515151 } /* Comment.Preproc */ -html[data-theme="light"] .highlight .cpf { color: #515151 } /* Comment.PreprocFile */ -html[data-theme="light"] .highlight .c1 { color: #515151 } /* Comment.Single */ -html[data-theme="light"] .highlight .cs { color: #515151 } /* Comment.Special */ -html[data-theme="light"] .highlight .gd { color: #005b82 } /* Generic.Deleted */ -html[data-theme="light"] .highlight .ge { font-style: italic } /* Generic.Emph */ -html[data-theme="light"] .highlight .gh { color: #005b82 } /* Generic.Heading */ -html[data-theme="light"] .highlight .gs { font-weight: bold } /* Generic.Strong */ -html[data-theme="light"] .highlight .gu { color: #005b82 } /* Generic.Subheading */ -html[data-theme="light"] .highlight .kc { color: #6730c5 } /* Keyword.Constant */ -html[data-theme="light"] .highlight .kd { color: #6730c5 } /* Keyword.Declaration */ -html[data-theme="light"] .highlight .kn { color: #6730c5 } /* Keyword.Namespace */ -html[data-theme="light"] .highlight .kp { color: #6730c5 } /* Keyword.Pseudo */ -html[data-theme="light"] .highlight .kr { color: #6730c5 } /* Keyword.Reserved */ -html[data-theme="light"] .highlight .kt { color: #7f4707 } /* Keyword.Type */ -html[data-theme="light"] .highlight .ld { color: #7f4707 } /* Literal.Date */ -html[data-theme="light"] .highlight .m { color: #7f4707 } /* Literal.Number */ -html[data-theme="light"] .highlight .s { color: #00622f } /* Literal.String */ -html[data-theme="light"] .highlight .na { color: #912583 } /* Name.Attribute */ -html[data-theme="light"] .highlight .nb { color: #7f4707 } /* Name.Builtin */ -html[data-theme="light"] .highlight .nc { color: #005b82 } /* Name.Class */ -html[data-theme="light"] .highlight .no { color: #005b82 } /* Name.Constant */ -html[data-theme="light"] .highlight .nd { color: #7f4707 } /* Name.Decorator */ -html[data-theme="light"] .highlight .ni { color: #00622f } /* Name.Entity */ -html[data-theme="light"] .highlight .ne { color: #6730c5 } /* Name.Exception */ -html[data-theme="light"] .highlight .nf { color: #005b82 } /* Name.Function */ -html[data-theme="light"] .highlight .nl { color: #7f4707 } /* Name.Label */ -html[data-theme="light"] .highlight .nn { color: #080808 } /* Name.Namespace */ -html[data-theme="light"] .highlight .nx { color: #080808 } /* Name.Other */ -html[data-theme="light"] .highlight .py { color: #005b82 } /* Name.Property */ -html[data-theme="light"] .highlight .nt { color: #005b82 } /* Name.Tag */ -html[data-theme="light"] .highlight .nv { color: #a12236 } /* Name.Variable */ -html[data-theme="light"] .highlight .ow { color: #6730c5 } /* Operator.Word */ -html[data-theme="light"] .highlight .pm { color: #080808 } /* Punctuation.Marker */ -html[data-theme="light"] .highlight .w { color: #080808 } /* Text.Whitespace */ -html[data-theme="light"] .highlight .mb { color: #7f4707 } /* Literal.Number.Bin */ -html[data-theme="light"] .highlight .mf { color: #7f4707 } /* Literal.Number.Float */ -html[data-theme="light"] .highlight .mh { color: #7f4707 } /* Literal.Number.Hex */ -html[data-theme="light"] .highlight .mi { color: #7f4707 } /* Literal.Number.Integer */ -html[data-theme="light"] .highlight .mo { color: #7f4707 } /* Literal.Number.Oct */ -html[data-theme="light"] .highlight .sa { color: #00622f } /* Literal.String.Affix */ -html[data-theme="light"] .highlight .sb { color: #00622f } /* Literal.String.Backtick */ -html[data-theme="light"] .highlight .sc { color: #00622f } /* Literal.String.Char */ -html[data-theme="light"] .highlight .dl { color: #00622f } /* Literal.String.Delimiter */ -html[data-theme="light"] .highlight .sd { color: #00622f } /* Literal.String.Doc */ -html[data-theme="light"] .highlight .s2 { color: #00622f } /* Literal.String.Double */ -html[data-theme="light"] .highlight .se { color: #00622f } /* Literal.String.Escape */ -html[data-theme="light"] .highlight .sh { color: #00622f } /* Literal.String.Heredoc */ -html[data-theme="light"] .highlight .si { color: #00622f } /* Literal.String.Interpol */ -html[data-theme="light"] .highlight .sx { color: #00622f } /* Literal.String.Other */ -html[data-theme="light"] .highlight .sr { color: #a12236 } /* Literal.String.Regex */ -html[data-theme="light"] .highlight .s1 { color: #00622f } /* Literal.String.Single */ -html[data-theme="light"] .highlight .ss { color: #005b82 } /* Literal.String.Symbol */ -html[data-theme="light"] .highlight .bp { color: #7f4707 } /* Name.Builtin.Pseudo */ -html[data-theme="light"] .highlight .fm { color: #005b82 } /* Name.Function.Magic */ -html[data-theme="light"] .highlight .vc { color: #a12236 } /* Name.Variable.Class */ -html[data-theme="light"] .highlight .vg { color: #a12236 } /* Name.Variable.Global */ -html[data-theme="light"] .highlight .vi { color: #a12236 } /* Name.Variable.Instance */ -html[data-theme="light"] .highlight .vm { color: #7f4707 } /* Name.Variable.Magic */ -html[data-theme="light"] .highlight .il { color: #7f4707 } /* Literal.Number.Integer.Long */ -html[data-theme="dark"] .highlight pre { line-height: 125%; } -html[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -html[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -html[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -html[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -html[data-theme="dark"] .highlight .hll { background-color: #ffd9002e } -html[data-theme="dark"] .highlight { background: #2b2b2b; color: #f8f8f2 } -html[data-theme="dark"] .highlight .c { color: #ffd900 } /* Comment */ -html[data-theme="dark"] .highlight .err { color: #ffa07a } /* Error */ -html[data-theme="dark"] .highlight .k { color: #dcc6e0 } /* Keyword */ -html[data-theme="dark"] .highlight .l { color: #ffd900 } /* Literal */ -html[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */ -html[data-theme="dark"] .highlight .o { color: #abe338 } /* Operator */ -html[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */ -html[data-theme="dark"] .highlight .ch { color: #ffd900 } /* Comment.Hashbang */ -html[data-theme="dark"] .highlight .cm { color: #ffd900 } /* Comment.Multiline */ -html[data-theme="dark"] .highlight .cp { color: #ffd900 } /* Comment.Preproc */ -html[data-theme="dark"] .highlight .cpf { color: #ffd900 } /* Comment.PreprocFile */ -html[data-theme="dark"] .highlight .c1 { color: #ffd900 } /* Comment.Single */ -html[data-theme="dark"] .highlight .cs { color: #ffd900 } /* Comment.Special */ -html[data-theme="dark"] .highlight .gd { color: #00e0e0 } /* Generic.Deleted */ -html[data-theme="dark"] .highlight .ge { font-style: italic } /* Generic.Emph */ -html[data-theme="dark"] .highlight .gh { color: #00e0e0 } /* Generic.Heading */ -html[data-theme="dark"] .highlight .gs { font-weight: bold } /* Generic.Strong */ -html[data-theme="dark"] .highlight .gu { color: #00e0e0 } /* Generic.Subheading */ -html[data-theme="dark"] .highlight .kc { color: #dcc6e0 } /* Keyword.Constant */ -html[data-theme="dark"] .highlight .kd { color: #dcc6e0 } /* Keyword.Declaration */ -html[data-theme="dark"] .highlight .kn { color: #dcc6e0 } /* Keyword.Namespace */ -html[data-theme="dark"] .highlight .kp { color: #dcc6e0 } /* Keyword.Pseudo */ -html[data-theme="dark"] .highlight .kr { color: #dcc6e0 } /* Keyword.Reserved */ -html[data-theme="dark"] .highlight .kt { color: #ffd900 } /* Keyword.Type */ -html[data-theme="dark"] .highlight .ld { color: #ffd900 } /* Literal.Date */ -html[data-theme="dark"] .highlight .m { color: #ffd900 } /* Literal.Number */ -html[data-theme="dark"] .highlight .s { color: #abe338 } /* Literal.String */ -html[data-theme="dark"] .highlight .na { color: #ffd900 } /* Name.Attribute */ -html[data-theme="dark"] .highlight .nb { color: #ffd900 } /* Name.Builtin */ -html[data-theme="dark"] .highlight .nc { color: #00e0e0 } /* Name.Class */ -html[data-theme="dark"] .highlight .no { color: #00e0e0 } /* Name.Constant */ -html[data-theme="dark"] .highlight .nd { color: #ffd900 } /* Name.Decorator */ -html[data-theme="dark"] .highlight .ni { color: #abe338 } /* Name.Entity */ -html[data-theme="dark"] .highlight .ne { color: #dcc6e0 } /* Name.Exception */ -html[data-theme="dark"] .highlight .nf { color: #00e0e0 } /* Name.Function */ -html[data-theme="dark"] .highlight .nl { color: #ffd900 } /* Name.Label */ -html[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ -html[data-theme="dark"] .highlight .nx { color: #f8f8f2 } /* Name.Other */ -html[data-theme="dark"] .highlight .py { color: #00e0e0 } /* Name.Property */ -html[data-theme="dark"] .highlight .nt { color: #00e0e0 } /* Name.Tag */ -html[data-theme="dark"] .highlight .nv { color: #ffa07a } /* Name.Variable */ -html[data-theme="dark"] .highlight .ow { color: #dcc6e0 } /* Operator.Word */ -html[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ -html[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ -html[data-theme="dark"] .highlight .mb { color: #ffd900 } /* Literal.Number.Bin */ -html[data-theme="dark"] .highlight .mf { color: #ffd900 } /* Literal.Number.Float */ -html[data-theme="dark"] .highlight .mh { color: #ffd900 } /* Literal.Number.Hex */ -html[data-theme="dark"] .highlight .mi { color: #ffd900 } /* Literal.Number.Integer */ -html[data-theme="dark"] .highlight .mo { color: #ffd900 } /* Literal.Number.Oct */ -html[data-theme="dark"] .highlight .sa { color: #abe338 } /* Literal.String.Affix */ -html[data-theme="dark"] .highlight .sb { color: #abe338 } /* Literal.String.Backtick */ -html[data-theme="dark"] .highlight .sc { color: #abe338 } /* Literal.String.Char */ -html[data-theme="dark"] .highlight .dl { color: #abe338 } /* Literal.String.Delimiter */ -html[data-theme="dark"] .highlight .sd { color: #abe338 } /* Literal.String.Doc */ -html[data-theme="dark"] .highlight .s2 { color: #abe338 } /* Literal.String.Double */ -html[data-theme="dark"] .highlight .se { color: #abe338 } /* Literal.String.Escape */ -html[data-theme="dark"] .highlight .sh { color: #abe338 } /* Literal.String.Heredoc */ -html[data-theme="dark"] .highlight .si { color: #abe338 } /* Literal.String.Interpol */ -html[data-theme="dark"] .highlight .sx { color: #abe338 } /* Literal.String.Other */ -html[data-theme="dark"] .highlight .sr { color: #ffa07a } /* Literal.String.Regex */ -html[data-theme="dark"] .highlight .s1 { color: #abe338 } /* Literal.String.Single */ -html[data-theme="dark"] .highlight .ss { color: #00e0e0 } /* Literal.String.Symbol */ -html[data-theme="dark"] .highlight .bp { color: #ffd900 } /* Name.Builtin.Pseudo */ -html[data-theme="dark"] .highlight .fm { color: #00e0e0 } /* Name.Function.Magic */ -html[data-theme="dark"] .highlight .vc { color: #ffa07a } /* Name.Variable.Class */ -html[data-theme="dark"] .highlight .vg { color: #ffa07a } /* Name.Variable.Global */ -html[data-theme="dark"] .highlight .vi { color: #ffa07a } /* Name.Variable.Instance */ -html[data-theme="dark"] .highlight .vm { color: #ffd900 } /* Name.Variable.Magic */ -html[data-theme="dark"] .highlight .il { color: #ffd900 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/scripts/bootstrap.js b/_static/scripts/bootstrap.js deleted file mode 100644 index 05b34741c..000000000 --- a/_static/scripts/bootstrap.js +++ /dev/null @@ -1,7 +0,0 @@ -!function(t){var e={};function i(n){if(e[n])return e[n].exports;var s=e[n]={i:n,l:!1,exports:{}};return t[n].call(s.exports,s,s.exports,i),s.l=!0,s.exports}i.m=t,i.c=e,i.d=function(t,e,n){i.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:n})},i.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},i.t=function(t,e){if(1&e&&(t=i(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var n=Object.create(null);if(i.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var s in t)i.d(n,s,function(e){return t[e]}.bind(null,s));return n},i.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return i.d(e,"a",e),e},i.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},i.p="",i(i.s=1)}([function(t,e,i){"use strict";function n(t){"loading"!=document.readyState?t():document.addEventListener("DOMContentLoaded",t)}i.d(e,"a",(function(){return n}))},function(t,e,i){"use strict";i.r(e);var n={};i.r(n),i.d(n,"top",(function(){return s})),i.d(n,"bottom",(function(){return o})),i.d(n,"right",(function(){return r})),i.d(n,"left",(function(){return a})),i.d(n,"auto",(function(){return l})),i.d(n,"basePlacements",(function(){return c})),i.d(n,"start",(function(){return u})),i.d(n,"end",(function(){return h})),i.d(n,"clippingParents",(function(){return d})),i.d(n,"viewport",(function(){return f})),i.d(n,"popper",(function(){return p})),i.d(n,"reference",(function(){return g})),i.d(n,"variationPlacements",(function(){return m})),i.d(n,"placements",(function(){return _})),i.d(n,"beforeRead",(function(){return b})),i.d(n,"read",(function(){return v})),i.d(n,"afterRead",(function(){return y})),i.d(n,"beforeMain",(function(){return w})),i.d(n,"main",(function(){return A})),i.d(n,"afterMain",(function(){return E})),i.d(n,"beforeWrite",(function(){return C})),i.d(n,"write",(function(){return T})),i.d(n,"afterWrite",(function(){return O})),i.d(n,"modifierPhases",(function(){return x})),i.d(n,"applyStyles",(function(){return N})),i.d(n,"arrow",(function(){return Z})),i.d(n,"computeStyles",(function(){return nt})),i.d(n,"eventListeners",(function(){return ot})),i.d(n,"flip",(function(){return vt})),i.d(n,"hide",(function(){return At})),i.d(n,"offset",(function(){return Et})),i.d(n,"popperOffsets",(function(){return Ct})),i.d(n,"preventOverflow",(function(){return Tt})),i.d(n,"popperGenerator",(function(){return Dt})),i.d(n,"detectOverflow",(function(){return bt})),i.d(n,"createPopperBase",(function(){return St})),i.d(n,"createPopper",(function(){return It})),i.d(n,"createPopperLite",(function(){return Nt}));var s="top",o="bottom",r="right",a="left",l="auto",c=[s,o,r,a],u="start",h="end",d="clippingParents",f="viewport",p="popper",g="reference",m=c.reduce((function(t,e){return t.concat([e+"-"+u,e+"-"+h])}),[]),_=[].concat(c,[l]).reduce((function(t,e){return t.concat([e,e+"-"+u,e+"-"+h])}),[]),b="beforeRead",v="read",y="afterRead",w="beforeMain",A="main",E="afterMain",C="beforeWrite",T="write",O="afterWrite",x=[b,v,y,w,A,E,C,T,O];function k(t){return t?(t.nodeName||"").toLowerCase():null}function L(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function D(t){return t instanceof L(t).Element||t instanceof Element}function S(t){return t instanceof L(t).HTMLElement||t instanceof HTMLElement}function I(t){return"undefined"!=typeof ShadowRoot&&(t instanceof L(t).ShadowRoot||t instanceof ShadowRoot)}var N={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];S(s)&&k(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});S(n)&&k(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function P(t){return t.split("-")[0]}var j=Math.max,M=Math.min,H=Math.round;function W(){var t=navigator.userAgentData;return null!=t&&t.brands?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function F(){return!/^((?!chrome|android).)*safari/i.test(W())}function B(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&S(t)&&(s=t.offsetWidth>0&&H(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&H(n.height)/t.offsetHeight||1);var r=(D(t)?L(t):window).visualViewport,a=!F()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,u=n.width/s,h=n.height/o;return{width:u,height:h,top:c,right:l+u,bottom:c+h,left:l,x:l,y:c}}function z(t){var e=B(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function q(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&I(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function R(t){return L(t).getComputedStyle(t)}function V(t){return["table","td","th"].indexOf(k(t))>=0}function $(t){return((D(t)?t.ownerDocument:t.document)||window.document).documentElement}function K(t){return"html"===k(t)?t:t.assignedSlot||t.parentNode||(I(t)?t.host:null)||$(t)}function Q(t){return S(t)&&"fixed"!==R(t).position?t.offsetParent:null}function X(t){for(var e=L(t),i=Q(t);i&&V(i)&&"static"===R(i).position;)i=Q(i);return i&&("html"===k(i)||"body"===k(i)&&"static"===R(i).position)?e:i||function(t){var e=/firefox/i.test(W());if(/Trident/i.test(W())&&S(t)&&"fixed"===R(t).position)return null;var i=K(t);for(I(i)&&(i=i.host);S(i)&&["html","body"].indexOf(k(i))<0;){var n=R(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function Y(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function U(t,e,i){return j(t,M(e,i))}function G(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function J(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}var Z={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,n=t.name,l=t.options,u=i.elements.arrow,h=i.modifiersData.popperOffsets,d=P(i.placement),f=Y(d),p=[a,r].indexOf(d)>=0?"height":"width";if(u&&h){var g=function(t,e){return G("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:J(t,c))}(l.padding,i),m=z(u),_="y"===f?s:a,b="y"===f?o:r,v=i.rects.reference[p]+i.rects.reference[f]-h[f]-i.rects.popper[p],y=h[f]-i.rects.reference[f],w=X(u),A=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,E=v/2-y/2,C=g[_],T=A-m[p]-g[b],O=A/2-m[p]/2+E,x=U(C,O,T),k=f;i.modifiersData[n]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&q(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function tt(t){return t.split("-")[1]}var et={top:"auto",right:"auto",bottom:"auto",left:"auto"};function it(t){var e,i=t.popper,n=t.popperRect,l=t.placement,c=t.variation,u=t.offsets,d=t.position,f=t.gpuAcceleration,p=t.adaptive,g=t.roundOffsets,m=t.isFixed,_=u.x,b=void 0===_?0:_,v=u.y,y=void 0===v?0:v,w="function"==typeof g?g({x:b,y:y}):{x:b,y:y};b=w.x,y=w.y;var A=u.hasOwnProperty("x"),E=u.hasOwnProperty("y"),C=a,T=s,O=window;if(p){var x=X(i),k="clientHeight",D="clientWidth";if(x===L(i)&&"static"!==R(x=$(i)).position&&"absolute"===d&&(k="scrollHeight",D="scrollWidth"),x=x,l===s||(l===a||l===r)&&c===h)T=o,y-=(m&&x===O&&O.visualViewport?O.visualViewport.height:x[k])-n.height,y*=f?1:-1;if(l===a||(l===s||l===o)&&c===h)C=r,b-=(m&&x===O&&O.visualViewport?O.visualViewport.width:x[D])-n.width,b*=f?1:-1}var S,I=Object.assign({position:d},p&&et),N=!0===g?function(t){var e=t.x,i=t.y,n=window.devicePixelRatio||1;return{x:H(e*n)/n||0,y:H(i*n)/n||0}}({x:b,y:y}):{x:b,y:y};return b=N.x,y=N.y,f?Object.assign({},I,((S={})[T]=E?"0":"",S[C]=A?"0":"",S.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",S)):Object.assign({},I,((e={})[T]=E?y+"px":"",e[C]=A?b+"px":"",e.transform="",e))}var nt={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:P(e.placement),variation:tt(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,it(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,it(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}},st={passive:!0};var ot={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=L(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,st)})),a&&l.addEventListener("resize",i.update,st),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,st)})),a&&l.removeEventListener("resize",i.update,st)}},data:{}},rt={left:"right",right:"left",bottom:"top",top:"bottom"};function at(t){return t.replace(/left|right|bottom|top/g,(function(t){return rt[t]}))}var lt={start:"end",end:"start"};function ct(t){return t.replace(/start|end/g,(function(t){return lt[t]}))}function ut(t){var e=L(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ht(t){return B($(t)).left+ut(t).scrollLeft}function dt(t){var e=R(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function ft(t,e){var i;void 0===e&&(e=[]);var n=function t(e){return["html","body","#document"].indexOf(k(e))>=0?e.ownerDocument.body:S(e)&&dt(e)?e:t(K(e))}(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=L(n),r=s?[o].concat(o.visualViewport||[],dt(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(ft(K(r)))}function pt(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function gt(t,e,i){return e===f?pt(function(t,e){var i=L(t),n=$(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=F();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ht(t),y:l}}(t,i)):D(e)?function(t,e){var i=B(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):pt(function(t){var e,i=$(t),n=ut(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=j(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=j(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ht(t),l=-n.scrollTop;return"rtl"===R(s||i).direction&&(a+=j(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}($(t)))}function mt(t,e,i,n){var s="clippingParents"===e?function(t){var e=ft(K(t)),i=["absolute","fixed"].indexOf(R(t).position)>=0&&S(t)?X(t):t;return D(i)?e.filter((function(t){return D(t)&&q(t,i)&&"body"!==k(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=gt(t,i,n);return e.top=j(s.top,e.top),e.right=M(s.right,e.right),e.bottom=M(s.bottom,e.bottom),e.left=j(s.left,e.left),e}),gt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}function _t(t){var e,i=t.reference,n=t.element,l=t.placement,c=l?P(l):null,d=l?tt(l):null,f=i.x+i.width/2-n.width/2,p=i.y+i.height/2-n.height/2;switch(c){case s:e={x:f,y:i.y-n.height};break;case o:e={x:f,y:i.y+i.height};break;case r:e={x:i.x+i.width,y:p};break;case a:e={x:i.x-n.width,y:p};break;default:e={x:i.x,y:i.y}}var g=c?Y(c):null;if(null!=g){var m="y"===g?"height":"width";switch(d){case u:e[g]=e[g]-(i[m]/2-n[m]/2);break;case h:e[g]=e[g]+(i[m]/2-n[m]/2)}}return e}function bt(t,e){void 0===e&&(e={});var i=e,n=i.placement,a=void 0===n?t.placement:n,l=i.strategy,u=void 0===l?t.strategy:l,h=i.boundary,m=void 0===h?d:h,_=i.rootBoundary,b=void 0===_?f:_,v=i.elementContext,y=void 0===v?p:v,w=i.altBoundary,A=void 0!==w&&w,E=i.padding,C=void 0===E?0:E,T=G("number"!=typeof C?C:J(C,c)),O=y===p?g:p,x=t.rects.popper,k=t.elements[A?O:y],L=mt(D(k)?k:k.contextElement||$(t.elements.popper),m,b,u),S=B(t.elements.reference),I=_t({reference:S,element:x,strategy:"absolute",placement:a}),N=pt(Object.assign({},x,I)),P=y===p?N:S,j={top:L.top-P.top+T.top,bottom:P.bottom-L.bottom+T.bottom,left:L.left-P.left+T.left,right:P.right-L.right+T.right},M=t.modifiersData.offset;if(y===p&&M){var H=M[a];Object.keys(j).forEach((function(t){var e=[r,o].indexOf(t)>=0?1:-1,i=[s,o].indexOf(t)>=0?"y":"x";j[t]+=H[i]*e}))}return j}var vt={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name;if(!e.modifiersData[n]._skip){for(var h=i.mainAxis,d=void 0===h||h,f=i.altAxis,p=void 0===f||f,g=i.fallbackPlacements,b=i.padding,v=i.boundary,y=i.rootBoundary,w=i.altBoundary,A=i.flipVariations,E=void 0===A||A,C=i.allowedAutoPlacements,T=e.options.placement,O=P(T),x=g||(O===T||!E?[at(T)]:function(t){if(P(t)===l)return[];var e=at(t);return[ct(t),e,ct(e)]}(T)),k=[T].concat(x).reduce((function(t,i){return t.concat(P(i)===l?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,l=i.allowedAutoPlacements,u=void 0===l?_:l,h=tt(n),d=h?a?m:m.filter((function(t){return tt(t)===h})):c,f=d.filter((function(t){return u.indexOf(t)>=0}));0===f.length&&(f=d);var p=f.reduce((function(e,i){return e[i]=bt(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[P(i)],e}),{});return Object.keys(p).sort((function(t,e){return p[t]-p[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:E,allowedAutoPlacements:C}):i)}),[]),L=e.rects.reference,D=e.rects.popper,S=new Map,I=!0,N=k[0],j=0;j=0,B=F?"width":"height",z=bt(e,{placement:M,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),q=F?W?r:a:W?o:s;L[B]>D[B]&&(q=at(q));var R=at(q),V=[];if(d&&V.push(z[H]<=0),p&&V.push(z[q]<=0,z[R]<=0),V.every((function(t){return t}))){N=M,I=!1;break}S.set(M,V)}if(I)for(var $=function(t){var e=k.find((function(e){var i=S.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},K=E?3:1;K>0;K--){if("break"===$(K))break}e.placement!==N&&(e.modifiersData[n]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function yt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function wt(t){return[s,r,o,a].some((function(e){return t[e]>=0}))}var At={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=bt(e,{elementContext:"reference"}),a=bt(e,{altBoundary:!0}),l=yt(r,n),c=yt(a,s,o),u=wt(l),h=wt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:u,hasPopperEscaped:h},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":u,"data-popper-escaped":h})}};var Et={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,n=t.name,o=i.offset,l=void 0===o?[0,0]:o,c=_.reduce((function(t,i){return t[i]=function(t,e,i){var n=P(t),o=[a,s].indexOf(n)>=0?-1:1,l="function"==typeof i?i(Object.assign({},e,{placement:t})):i,c=l[0],u=l[1];return c=c||0,u=(u||0)*o,[a,r].indexOf(n)>=0?{x:u,y:c}:{x:c,y:u}}(i,e.rects,l),t}),{}),u=c[e.placement],h=u.x,d=u.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=h,e.modifiersData.popperOffsets.y+=d),e.modifiersData[n]=c}};var Ct={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=_t({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}};var Tt={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,n=t.name,l=i.mainAxis,c=void 0===l||l,h=i.altAxis,d=void 0!==h&&h,f=i.boundary,p=i.rootBoundary,g=i.altBoundary,m=i.padding,_=i.tether,b=void 0===_||_,v=i.tetherOffset,y=void 0===v?0:v,w=bt(e,{boundary:f,rootBoundary:p,padding:m,altBoundary:g}),A=P(e.placement),E=tt(e.placement),C=!E,T=Y(A),O="x"===T?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,D="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,S="number"==typeof D?{mainAxis:D,altAxis:D}:Object.assign({mainAxis:0,altAxis:0},D),I=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,N={x:0,y:0};if(x){if(c){var H,W="y"===T?s:a,F="y"===T?o:r,B="y"===T?"height":"width",q=x[T],R=q+w[W],V=q-w[F],$=b?-L[B]/2:0,K=E===u?k[B]:L[B],Q=E===u?-L[B]:-k[B],G=e.elements.arrow,J=b&&G?z(G):{width:0,height:0},Z=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=Z[W],it=Z[F],nt=U(0,k[B],J[B]),st=C?k[B]/2-$-nt-et-S.mainAxis:K-nt-et-S.mainAxis,ot=C?-k[B]/2+$+nt+it+S.mainAxis:Q+nt+it+S.mainAxis,rt=e.elements.arrow&&X(e.elements.arrow),at=rt?"y"===T?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(H=null==I?void 0:I[T])?H:0,ct=q+ot-lt,ut=U(b?M(R,q+st-lt-at):R,q,b?j(V,ct):V);x[T]=ut,N[T]=ut-q}if(d){var ht,dt="x"===T?s:a,ft="x"===T?o:r,pt=x[O],gt="y"===O?"height":"width",mt=pt+w[dt],_t=pt-w[ft],vt=-1!==[s,a].indexOf(A),yt=null!=(ht=null==I?void 0:I[O])?ht:0,wt=vt?mt:pt-k[gt]-L[gt]-yt+S.altAxis,At=vt?pt+k[gt]+L[gt]-yt-S.altAxis:_t,Et=b&&vt?function(t,e,i){var n=U(t,e,i);return n>i?i:n}(wt,pt,At):U(b?wt:mt,pt,b?At:_t);x[O]=Et,N[O]=Et-pt}e.modifiersData[n]=N}},requiresIfExists:["offset"]};function Ot(t,e,i){void 0===i&&(i=!1);var n,s,o=S(e),r=S(e)&&function(t){var e=t.getBoundingClientRect(),i=H(e.width)/t.offsetWidth||1,n=H(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=$(e),l=B(t,r,i),c={scrollLeft:0,scrollTop:0},u={x:0,y:0};return(o||!o&&!i)&&(("body"!==k(e)||dt(a))&&(c=(n=e)!==L(n)&&S(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:ut(n)),S(e)?((u=B(e,!0)).x+=e.clientLeft,u.y+=e.clientTop):a&&(u.x=ht(a))),{x:l.left+c.scrollLeft-u.x,y:l.top+c.scrollTop-u.y,width:l.width,height:l.height}}function xt(t){var e=new Map,i=new Set,n=[];return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||function t(s){i.add(s.name),[].concat(s.requires||[],s.requiresIfExists||[]).forEach((function(n){if(!i.has(n)){var s=e.get(n);s&&t(s)}})),n.push(s)}(t)})),n}var kt={placement:"bottom",modifiers:[],strategy:"absolute"};function Lt(){for(var t=arguments.length,e=new Array(t),i=0;i{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i="#"+i.split("#")[1]),e=i&&"#"!==i?i.trim():null}return e},jt=t=>{const e=Pt(t);return e&&document.querySelector(e)?e:null},Mt=t=>{const e=Pt(t);return e?document.querySelector(e):null},Ht=t=>{t.dispatchEvent(new Event("transitionend"))},Wt=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ft=t=>Wt(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(t):null,Bt=t=>{if(!Wt(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},zt=t=>!t||t.nodeType!==Node.ELEMENT_NODE||(!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled"))),qt=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?qt(t.parentNode):null},Rt=()=>{},Vt=t=>{t.offsetHeight},$t=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Kt=[],Qt=()=>"rtl"===document.documentElement.dir,Xt=t=>{var e;e=()=>{const e=$t();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Kt.length||document.addEventListener("DOMContentLoaded",()=>{for(const t of Kt)t()}),Kt.push(e)):e()},Yt=t=>{"function"==typeof t&&t()},Ut=(t,e,i=!0)=>{if(!i)return void Yt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener("transitionend",o),Yt(t))};e.addEventListener("transitionend",o),setTimeout(()=>{s||Ht(e)},n)},Gt=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Jt=/[^.]*(?=\..*)\.|.*/,Zt=/\..*/,te=/::\d+$/,ee={};let ie=1;const ne={mouseenter:"mouseover",mouseleave:"mouseout"},se=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function oe(t,e){return e&&`${e}::${ie++}`||t.uidEvent||ie++}function re(t){const e=oe(t);return t.uidEvent=e,ee[e]=ee[e]||{},ee[e]}function ae(t,e,i=null){return Object.values(t).find(t=>t.callable===e&&t.delegationSelector===i)}function le(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=de(t);return se.has(o)||(o=t),[n,s,o]}function ce(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=le(e,i,n);if(e in ne){r=(t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)})(r)}const l=re(t),c=l[a]||(l[a]={}),u=ae(c,r,o?i:null);if(u)return void(u.oneOff=u.oneOff&&s);const h=oe(r,e.replace(Jt,"")),d=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return pe(s,{delegateTarget:r}),n.oneOff&&fe.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return pe(n,{delegateTarget:t}),i.oneOff&&fe.off(t,n.type,e),e.apply(t,[n])}}(t,r);d.delegationSelector=o?i:null,d.callable=r,d.oneOff=s,d.uidEvent=h,c[h]=d,t.addEventListener(a,d,o)}function ue(t,e,i,n,s){const o=ae(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function he(t,e,i,n){const s=e[i]||{};for(const o of Object.keys(s))if(o.includes(n)){const n=s[o];ue(t,e,i,n.callable,n.delegationSelector)}}function de(t){return t=t.replace(Zt,""),ne[t]||t}const fe={on(t,e,i,n){ce(t,e,i,n,!1)},one(t,e,i,n){ce(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=le(e,i,n),a=r!==e,l=re(t),c=l[r]||{},u=e.startsWith(".");if(void 0===o){if(u)for(const i of Object.keys(l))he(t,l,i,e.slice(1));for(const i of Object.keys(c)){const n=i.replace(te,"");if(!a||e.includes(n)){const e=c[i];ue(t,l,r,e.callable,e.delegationSelector)}}}else{if(!Object.keys(c).length)return;ue(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=$t();let s=null,o=!0,r=!0,a=!1;e!==de(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());let l=new Event(e,{bubbles:o,cancelable:!0});return l=pe(l,i),a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function pe(t,e){for(const[i,n]of Object.entries(e||{}))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}const ge=new Map,me={set(t,e,i){ge.has(t)||ge.set(t,new Map);const n=ge.get(t);n.has(e)||0===n.size?n.set(e,i):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(n.keys())[0]}.`)},get:(t,e)=>ge.has(t)&&ge.get(t).get(e)||null,remove(t,e){if(!ge.has(t))return;const i=ge.get(t);i.delete(e),0===i.size&&ge.delete(t)}};function _e(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function be(t){return t.replace(/[A-Z]/g,t=>"-"+t.toLowerCase())}const ve={setDataAttribute(t,e,i){t.setAttribute("data-bs-"+be(e),i)},removeDataAttribute(t,e){t.removeAttribute("data-bs-"+be(e))},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter(t=>t.startsWith("bs")&&!t.startsWith("bsConfig"));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=_e(t.dataset[n])}return e},getDataAttribute:(t,e)=>_e(t.getAttribute("data-bs-"+be(e)))};class ye{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=Wt(e)?ve.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...Wt(e)?ve.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const n of Object.keys(e)){const s=e[n],o=t[n],r=Wt(o)?"element":null==(i=o)?""+i:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(r))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${r}" but expected type "${s}".`)}var i}}class we extends ye{constructor(t,e){super(),(t=Ft(t))&&(this._element=t,this._config=this._getConfig(e),me.set(this._element,this.constructor.DATA_KEY,this))}dispose(){me.remove(this._element,this.constructor.DATA_KEY),fe.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Ut(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return me.get(Ft(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.2.2"}static get DATA_KEY(){return"bs."+this.NAME}static get EVENT_KEY(){return"."+this.DATA_KEY}static eventName(t){return`${t}${this.EVENT_KEY}`}}const Ae=(t,e="hide")=>{const i="click.dismiss"+t.EVENT_KEY,n=t.NAME;fe.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),zt(this))return;const s=Mt(this)||this.closest("."+n);t.getOrCreateInstance(s)[e]()}))};class Ee extends we{static get NAME(){return"alert"}close(){if(fe.trigger(this._element,"close.bs.alert").defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback(()=>this._destroyElement(),this._element,t)}_destroyElement(){this._element.remove(),fe.trigger(this._element,"closed.bs.alert"),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Ee.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}Ae(Ee,"close"),Xt(Ee);class Ce extends we{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=Ce.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}fe.on(document,"click.bs.button.data-api",'[data-bs-toggle="button"]',t=>{t.preventDefault();const e=t.target.closest('[data-bs-toggle="button"]');Ce.getOrCreateInstance(e).toggle()}),Xt(Ce);const Te={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter(t=>t.matches(e)),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map(t=>t+':not([tabindex^="-"])').join(",");return this.find(e,t).filter(t=>!zt(t)&&Bt(t))}},Oe={endCallback:null,leftCallback:null,rightCallback:null},xe={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class ke extends ye{constructor(t,e){super(),this._element=t,t&&ke.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Oe}static get DefaultType(){return xe}static get NAME(){return"swipe"}dispose(){fe.off(this._element,".bs.swipe")}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Yt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Yt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(fe.on(this._element,"pointerdown.bs.swipe",t=>this._start(t)),fe.on(this._element,"pointerup.bs.swipe",t=>this._end(t)),this._element.classList.add("pointer-event")):(fe.on(this._element,"touchstart.bs.swipe",t=>this._start(t)),fe.on(this._element,"touchmove.bs.swipe",t=>this._move(t)),fe.on(this._element,"touchend.bs.swipe",t=>this._end(t)))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const Le={ArrowLeft:"right",ArrowRight:"left"},De={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},Se={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class Ie extends we{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=Te.findOne(".carousel-indicators",this._element),this._addEventListeners(),"carousel"===this._config.ride&&this.cycle()}static get Default(){return De}static get DefaultType(){return Se}static get NAME(){return"carousel"}next(){this._slide("next")}nextWhenVisible(){!document.hidden&&Bt(this._element)&&this.next()}prev(){this._slide("prev")}pause(){this._isSliding&&Ht(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval(()=>this.nextWhenVisible(),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?fe.one(this._element,"slid.bs.carousel",()=>this.cycle()):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void fe.one(this._element,"slid.bs.carousel",()=>this.to(t));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?"next":"prev";this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&fe.on(this._element,"keydown.bs.carousel",t=>this._keydown(t)),"hover"===this._config.pause&&(fe.on(this._element,"mouseenter.bs.carousel",()=>this.pause()),fe.on(this._element,"mouseleave.bs.carousel",()=>this._maybeEnableCycle())),this._config.touch&&ke.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of Te.find(".carousel-item img",this._element))fe.on(t,"dragstart.bs.carousel",t=>t.preventDefault());const t={leftCallback:()=>this._slide(this._directionToOrder("left")),rightCallback:()=>this._slide(this._directionToOrder("right")),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout(()=>this._maybeEnableCycle(),500+this._config.interval))}};this._swipeHelper=new ke(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=Le[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=Te.findOne(".active",this._indicatorsElement);e.classList.remove("active"),e.removeAttribute("aria-current");const i=Te.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add("active"),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n="next"===t,s=e||Gt(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>fe.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r("slide.bs.carousel").defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),Vt(s),i.classList.add(l),s.classList.add(l);this._queueCallback(()=>{s.classList.remove(l,c),s.classList.add("active"),i.classList.remove("active",c,l),this._isSliding=!1,r("slid.bs.carousel")},i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return Te.findOne(".active.carousel-item",this._element)}_getItems(){return Te.find(".carousel-item",this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Qt()?"left"===t?"prev":"next":"left"===t?"next":"prev"}_orderToDirection(t){return Qt()?"prev"===t?"left":"right":"prev"===t?"right":"left"}static jQueryInterface(t){return this.each((function(){const e=Ie.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}fe.on(document,"click.bs.carousel.data-api","[data-bs-slide], [data-bs-slide-to]",(function(t){const e=Mt(this);if(!e||!e.classList.contains("carousel"))return;t.preventDefault();const i=Ie.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===ve.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),fe.on(window,"load.bs.carousel.data-api",()=>{const t=Te.find('[data-bs-ride="carousel"]');for(const e of t)Ie.getOrCreateInstance(e)}),Xt(Ie);const Ne={parent:null,toggle:!0},Pe={parent:"(null|element)",toggle:"boolean"};class je extends we{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=Te.find('[data-bs-toggle="collapse"]');for(const t of i){const e=jt(t),i=Te.find(e).filter(t=>t===this._element);null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return Ne}static get DefaultType(){return Pe}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter(t=>t!==this._element).map(t=>je.getOrCreateInstance(t,{toggle:!1}))),t.length&&t[0]._isTransitioning)return;if(fe.trigger(this._element,"show.bs.collapse").defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove("collapse"),this._element.classList.add("collapsing"),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i="scroll"+(e[0].toUpperCase()+e.slice(1));this._queueCallback(()=>{this._isTransitioning=!1,this._element.classList.remove("collapsing"),this._element.classList.add("collapse","show"),this._element.style[e]="",fe.trigger(this._element,"shown.bs.collapse")},this._element,!0),this._element.style[e]=this._element[i]+"px"}hide(){if(this._isTransitioning||!this._isShown())return;if(fe.trigger(this._element,"hide.bs.collapse").defaultPrevented)return;const t=this._getDimension();this._element.style[t]=this._element.getBoundingClientRect()[t]+"px",Vt(this._element),this._element.classList.add("collapsing"),this._element.classList.remove("collapse","show");for(const t of this._triggerArray){const e=Mt(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0;this._element.style[t]="",this._queueCallback(()=>{this._isTransitioning=!1,this._element.classList.remove("collapsing"),this._element.classList.add("collapse"),fe.trigger(this._element,"hidden.bs.collapse")},this._element,!0)}_isShown(t=this._element){return t.classList.contains("show")}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ft(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren('[data-bs-toggle="collapse"]');for(const e of t){const t=Mt(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=Te.find(":scope .collapse .collapse",this._config.parent);return Te.find(t,this._config.parent).filter(t=>!e.includes(t))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=je.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}fe.on(document,"click.bs.collapse.data-api",'[data-bs-toggle="collapse"]',(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();const e=jt(this),i=Te.find(e);for(const t of i)je.getOrCreateInstance(t,{toggle:!1}).toggle()})),Xt(je);const Me="ArrowUp",He="ArrowDown",We='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',Fe=Qt()?"top-end":"top-start",Be=Qt()?"top-start":"top-end",ze=Qt()?"bottom-end":"bottom-start",qe=Qt()?"bottom-start":"bottom-end",Re=Qt()?"left-start":"right-start",Ve=Qt()?"right-start":"left-start",$e={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},Ke={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class Qe extends we{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=Te.next(this._element,".dropdown-menu")[0]||Te.prev(this._element,".dropdown-menu")[0]||Te.findOne(".dropdown-menu",this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return $e}static get DefaultType(){return Ke}static get NAME(){return"dropdown"}toggle(){return this._isShown()?this.hide():this.show()}show(){if(zt(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!fe.trigger(this._element,"show.bs.dropdown",t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add("show"),this._element.classList.add("show"),fe.trigger(this._element,"shown.bs.dropdown",t)}}hide(){if(zt(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!fe.trigger(this._element,"hide.bs.dropdown",t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._popper&&this._popper.destroy(),this._menu.classList.remove("show"),this._element.classList.remove("show"),this._element.setAttribute("aria-expanded","false"),ve.removeDataAttribute(this._menu,"popper"),fe.trigger(this._element,"hidden.bs.dropdown",t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!Wt(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError("dropdown".toUpperCase()+': Option "reference" provided type "object" without a required "getBoundingClientRect" method.');return t}_createPopper(){if(void 0===n)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:Wt(this._config.reference)?t=Ft(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const e=this._getPopperConfig();this._popper=It(t,this._menu,e)}_isShown(){return this._menu.classList.contains("show")}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Re;if(t.classList.contains("dropstart"))return Ve;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?Be:Fe:e?qe:ze}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map(t=>Number.parseInt(t,10)):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(ve.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,..."function"==typeof this._config.popperConfig?this._config.popperConfig(t):this._config.popperConfig}}_selectMenuItem({key:t,target:e}){const i=Te.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter(t=>Bt(t));i.length&&Gt(i,e,t===He,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=Qe.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=Te.find('[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled).show');for(const i of e){const e=Qe.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Me,He].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(We)?this:Te.prev(this,We)[0]||Te.next(this,We)[0]||Te.findOne(We,t.delegateTarget.parentNode),o=Qe.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}fe.on(document,"keydown.bs.dropdown.data-api",We,Qe.dataApiKeydownHandler),fe.on(document,"keydown.bs.dropdown.data-api",".dropdown-menu",Qe.dataApiKeydownHandler),fe.on(document,"click.bs.dropdown.data-api",Qe.clearMenus),fe.on(document,"keyup.bs.dropdown.data-api",Qe.clearMenus),fe.on(document,"click.bs.dropdown.data-api",We,(function(t){t.preventDefault(),Qe.getOrCreateInstance(this).toggle()})),Xt(Qe);class Xe{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,"padding-right",e=>e+t),this._setElementAttributes(".fixed-top, .fixed-bottom, .is-fixed, .sticky-top","padding-right",e=>e+t),this._setElementAttributes(".sticky-top","margin-right",e=>e-t)}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,"padding-right"),this._resetElementAttributes(".fixed-top, .fixed-bottom, .is-fixed, .sticky-top","padding-right"),this._resetElementAttributes(".sticky-top","margin-right")}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,i(Number.parseFloat(s))+"px")})}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&ve.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,t=>{const i=ve.getDataAttribute(t,e);null!==i?(ve.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)})}_applyManipulationCallback(t,e){if(Wt(t))e(t);else for(const i of Te.find(t,this._element))e(i)}}const Ye={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Ue={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Ge extends ye{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Ye}static get DefaultType(){return Ue}static get NAME(){return"backdrop"}show(t){if(!this._config.isVisible)return void Yt(t);this._append();const e=this._getElement();this._config.isAnimated&&Vt(e),e.classList.add("show"),this._emulateAnimation(()=>{Yt(t)})}hide(t){this._config.isVisible?(this._getElement().classList.remove("show"),this._emulateAnimation(()=>{this.dispose(),Yt(t)})):Yt(t)}dispose(){this._isAppended&&(fe.off(this._element,"mousedown.bs.backdrop"),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ft(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),fe.on(t,"mousedown.bs.backdrop",()=>{Yt(this._config.clickCallback)}),this._isAppended=!0}_emulateAnimation(t){Ut(t,this._getElement(),this._config.isAnimated)}}const Je={autofocus:!0,trapElement:null},Ze={autofocus:"boolean",trapElement:"element"};class ti extends ye{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return Je}static get DefaultType(){return Ze}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),fe.off(document,".bs.focustrap"),fe.on(document,"focusin.bs.focustrap",t=>this._handleFocusin(t)),fe.on(document,"keydown.tab.bs.focustrap",t=>this._handleKeydown(t)),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,fe.off(document,".bs.focustrap"))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=Te.focusableChildren(e);0===i.length?e.focus():"backward"===this._lastTabNavDirection?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?"backward":"forward")}}const ei={backdrop:!0,focus:!0,keyboard:!0},ii={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class ni extends we{constructor(t,e){super(t,e),this._dialog=Te.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new Xe,this._addEventListeners()}static get Default(){return ei}static get DefaultType(){return ii}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){if(this._isShown||this._isTransitioning)return;fe.trigger(this._element,"show.bs.modal",{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add("modal-open"),this._adjustDialog(),this._backdrop.show(()=>this._showElement(t)))}hide(){if(!this._isShown||this._isTransitioning)return;fe.trigger(this._element,"hide.bs.modal").defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove("show"),this._queueCallback(()=>this._hideModal(),this._element,this._isAnimated()))}dispose(){for(const t of[window,this._dialog])fe.off(t,".bs.modal");this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Ge({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new ti({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=Te.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),Vt(this._element),this._element.classList.add("show");this._queueCallback(()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,fe.trigger(this._element,"shown.bs.modal",{relatedTarget:t})},this._dialog,this._isAnimated())}_addEventListeners(){fe.on(this._element,"keydown.dismiss.bs.modal",t=>{if("Escape"===t.key)return this._config.keyboard?(t.preventDefault(),void this.hide()):void this._triggerBackdropTransition()}),fe.on(window,"resize.bs.modal",()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()}),fe.on(this._element,"mousedown.dismiss.bs.modal",t=>{fe.one(this._element,"click.dismiss.bs.modal",e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())})})}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide(()=>{document.body.classList.remove("modal-open"),this._resetAdjustments(),this._scrollBar.reset(),fe.trigger(this._element,"hidden.bs.modal")})}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(fe.trigger(this._element,"hidePrevented.bs.modal").defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains("modal-static")||(t||(this._element.style.overflowY="hidden"),this._element.classList.add("modal-static"),this._queueCallback(()=>{this._element.classList.remove("modal-static"),this._queueCallback(()=>{this._element.style.overflowY=e},this._dialog)},this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Qt()?"paddingLeft":"paddingRight";this._element.style[t]=e+"px"}if(!i&&t){const t=Qt()?"paddingRight":"paddingLeft";this._element.style[t]=e+"px"}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=ni.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}fe.on(document,"click.bs.modal.data-api",'[data-bs-toggle="modal"]',(function(t){const e=Mt(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),fe.one(e,"show.bs.modal",t=>{t.defaultPrevented||fe.one(e,"hidden.bs.modal",()=>{Bt(this)&&this.focus()})});const i=Te.findOne(".modal.show");i&&ni.getInstance(i).hide();ni.getOrCreateInstance(e).toggle(this)})),Ae(ni),Xt(ni);const si=".bs.offcanvas",oi=`load${si}.data-api`,ri=`click${si}.data-api`,ai={backdrop:!0,keyboard:!0,scroll:!1},li={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class ci extends we{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return ai}static get DefaultType(){return li}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){if(this._isShown)return;if(fe.trigger(this._element,"show.bs.offcanvas",{relatedTarget:t}).defaultPrevented)return;this._isShown=!0,this._backdrop.show(),this._config.scroll||(new Xe).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add("showing");this._queueCallback(()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add("show"),this._element.classList.remove("showing"),fe.trigger(this._element,"shown.bs.offcanvas",{relatedTarget:t})},this._element,!0)}hide(){if(!this._isShown)return;if(fe.trigger(this._element,"hide.bs.offcanvas").defaultPrevented)return;this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add("hiding"),this._backdrop.hide();this._queueCallback(()=>{this._element.classList.remove("show","hiding"),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new Xe).reset(),fe.trigger(this._element,"hidden.bs.offcanvas")},this._element,!0)}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Ge({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():fe.trigger(this._element,"hidePrevented.bs.offcanvas")}:null})}_initializeFocusTrap(){return new ti({trapElement:this._element})}_addEventListeners(){fe.on(this._element,"keydown.dismiss.bs.offcanvas",t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():fe.trigger(this._element,"hidePrevented.bs.offcanvas"))})}static jQueryInterface(t){return this.each((function(){const e=ci.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}fe.on(document,ri,'[data-bs-toggle="offcanvas"]',(function(t){const e=Mt(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),zt(this))return;fe.one(e,"hidden.bs.offcanvas",()=>{Bt(this)&&this.focus()});const i=Te.findOne(".offcanvas.show");i&&i!==e&&ci.getInstance(i).hide();ci.getOrCreateInstance(e).toggle(this)})),fe.on(window,oi,()=>{for(const t of Te.find(".offcanvas.show"))ci.getOrCreateInstance(t).show()}),fe.on(window,"resize.bs.offcanvas",()=>{for(const t of Te.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&ci.getOrCreateInstance(t).hide()}),Ae(ci),Xt(ci);const ui=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),hi=/^(?:(?:https?|mailto|ftp|tel|file|sms):|[^#&/:?]*(?:[#/?]|$))/i,di=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i,fi=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!ui.has(i)||Boolean(hi.test(t.nodeValue)||di.test(t.nodeValue)):e.filter(t=>t instanceof RegExp).some(t=>t.test(i))},pi={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]};const gi={allowList:pi,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},mi={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},_i={entry:"(string|element|function|null)",selector:"(string|element)"};class bi extends ye{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return gi}static get DefaultType(){return mi}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map(t=>this._resolvePossibleFunction(t)).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},_i)}_setContent(t,e,i){const n=Te.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?Wt(e)?this._putElementInTemplate(Ft(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)fi(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return"function"==typeof t?t(this):t}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const vi=new Set(["sanitize","allowList","sanitizeFn"]),yi={AUTO:"auto",TOP:"top",RIGHT:Qt()?"left":"right",BOTTOM:"bottom",LEFT:Qt()?"right":"left"},wi={allowList:pi,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,0],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},Ai={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class Ei extends we{constructor(t,e){if(void 0===n)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,e),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return wi}static get DefaultType(){return Ai}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),fe.off(this._element.closest(".modal"),"hide.bs.modal",this._hideModalHandler),this.tip&&this.tip.remove(),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=fe.trigger(this._element,this.constructor.eventName("show")),e=(qt(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this.tip&&(this.tip.remove(),this.tip=null);const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),fe.trigger(this._element,this.constructor.eventName("inserted"))),this._popper?this._popper.update():this._popper=this._createPopper(i),i.classList.add("show"),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._queueCallback(()=>{fe.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1},this.tip,this._isAnimated())}hide(){if(!this._isShown())return;if(fe.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented)return;const t=this._getTipElement();if(t.classList.remove("show"),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._activeTrigger.click=!1,this._activeTrigger.focus=!1,this._activeTrigger.hover=!1,this._isHovered=null;this._queueCallback(()=>{this._isWithActiveTrigger()||(this._isHovered||t.remove(),this._element.removeAttribute("aria-describedby"),fe.trigger(this._element,this.constructor.eventName("hidden")),this._disposePopper())},this.tip,this._isAnimated())}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove("fade","show"),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add("fade"),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new bi({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{".tooltip-inner":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains("fade")}_isShown(){return this.tip&&this.tip.classList.contains("show")}_createPopper(t){const e="function"==typeof this._config.placement?this._config.placement.call(this,t,this._element):this._config.placement,i=yi[e.toUpperCase()];return It(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map(t=>Number.parseInt(t,10)):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return"function"==typeof t?t.call(this._element):t}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,..."function"==typeof this._config.popperConfig?this._config.popperConfig(e):this._config.popperConfig}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)fe.on(this._element,this.constructor.eventName("click"),this._config.selector,t=>{this._initializeOnDelegatedTarget(t).toggle()});else if("manual"!==e){const t="hover"===e?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i="hover"===e?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");fe.on(this._element,t,this._config.selector,t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?"focus":"hover"]=!0,e._enter()}),fe.on(this._element,i,this._config.selector,t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?"focus":"hover"]=e._element.contains(t.relatedTarget),e._leave()})}this._hideModalHandler=()=>{this._element&&this.hide()},fe.on(this._element.closest(".modal"),"hide.bs.modal",this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout(()=>{this._isHovered&&this.show()},this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout(()=>{this._isHovered||this.hide()},this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=ve.getDataAttributes(this._element);for(const t of Object.keys(e))vi.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ft(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const e in this._config)this.constructor.Default[e]!==this._config[e]&&(t[e]=this._config[e]);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null)}static jQueryInterface(t){return this.each((function(){const e=Ei.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Xt(Ei);const Ci={...Ei.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},Ti={...Ei.DefaultType,content:"(null|string|element|function)"};class Oi extends Ei{static get Default(){return Ci}static get DefaultType(){return Ti}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{".popover-header":this._getTitle(),".popover-body":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=Oi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Xt(Oi);const xi={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},ki={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Li extends we{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return xi}static get DefaultType(){return ki}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ft(t.target)||document.body,t.rootMargin=t.offset?t.offset+"px 0px -30%":t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map(t=>Number.parseFloat(t))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(fe.off(this._config.target,"click.bs.scrollspy"),fe.on(this._config.target,"click.bs.scrollspy","[href]",t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}}))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver(t=>this._observerCallback(t),t)}_observerCallback(t){const e=t=>this._targetLinks.get("#"+t.target.id),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=Te.find("[href]",this._config.target);for(const e of t){if(!e.hash||zt(e))continue;const t=Te.findOne(e.hash,this._element);Bt(t)&&(this._targetLinks.set(e.hash,e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add("active"),this._activateParents(t),fe.trigger(this._element,"activate.bs.scrollspy",{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))Te.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add("active");else for(const e of Te.parents(t,".nav, .list-group"))for(const t of Te.prev(e,".nav-link, .nav-item > .nav-link, .list-group-item"))t.classList.add("active")}_clearActiveClass(t){t.classList.remove("active");const e=Te.find("[href].active",t);for(const t of e)t.classList.remove("active")}static jQueryInterface(t){return this.each((function(){const e=Li.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(window,"load.bs.scrollspy.data-api",()=>{for(const t of Te.find('[data-bs-spy="scroll"]'))Li.getOrCreateInstance(t)}),Xt(Li);const Di="ArrowLeft",Si="ArrowRight",Ii="ArrowUp",Ni="ArrowDown",Pi='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',ji='.nav-link:not(.dropdown-toggle), .list-group-item:not(.dropdown-toggle), [role="tab"]:not(.dropdown-toggle), '+Pi;class Mi extends we{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),fe.on(this._element,"keydown.bs.tab",t=>this._keydown(t)))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?fe.trigger(e,"hide.bs.tab",{relatedTarget:t}):null;fe.trigger(t,"show.bs.tab",{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){if(!t)return;t.classList.add("active"),this._activate(Mt(t));this._queueCallback(()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),fe.trigger(t,"shown.bs.tab",{relatedTarget:e})):t.classList.add("show")},t,t.classList.contains("fade"))}_deactivate(t,e){if(!t)return;t.classList.remove("active"),t.blur(),this._deactivate(Mt(t));this._queueCallback(()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),fe.trigger(t,"hidden.bs.tab",{relatedTarget:e})):t.classList.remove("show")},t,t.classList.contains("fade"))}_keydown(t){if(![Di,Si,Ii,Ni].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=[Si,Ni].includes(t.key),i=Gt(this._getChildren().filter(t=>!zt(t)),t.target,e,!0);i&&(i.focus({preventScroll:!0}),Mi.getOrCreateInstance(i).show())}_getChildren(){return Te.find(ji,this._parent)}_getActiveElem(){return this._getChildren().find(t=>this._elemIsActive(t))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=Mt(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby","#"+t.id))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=Te.findOne(t,i);s&&s.classList.toggle(n,e)};n(".dropdown-toggle","active"),n(".dropdown-menu","show"),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains("active")}_getInnerElement(t){return t.matches(ji)?t:Te.findOne(ji,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Mi.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(document,"click.bs.tab",Pi,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),zt(this)||Mi.getOrCreateInstance(this).show()})),fe.on(window,"load.bs.tab",()=>{for(const t of Te.find('.active[data-bs-toggle="tab"], .active[data-bs-toggle="pill"], .active[data-bs-toggle="list"]'))Mi.getOrCreateInstance(t)}),Xt(Mi);const Hi={animation:"boolean",autohide:"boolean",delay:"number"},Wi={animation:!0,autohide:!0,delay:5e3};class Fi extends we{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return Wi}static get DefaultType(){return Hi}static get NAME(){return"toast"}show(){if(fe.trigger(this._element,"show.bs.toast").defaultPrevented)return;this._clearTimeout(),this._config.animation&&this._element.classList.add("fade");this._element.classList.remove("hide"),Vt(this._element),this._element.classList.add("show","showing"),this._queueCallback(()=>{this._element.classList.remove("showing"),fe.trigger(this._element,"shown.bs.toast"),this._maybeScheduleHide()},this._element,this._config.animation)}hide(){if(!this.isShown())return;if(fe.trigger(this._element,"hide.bs.toast").defaultPrevented)return;this._element.classList.add("showing"),this._queueCallback(()=>{this._element.classList.add("hide"),this._element.classList.remove("showing","show"),fe.trigger(this._element,"hidden.bs.toast")},this._element,this._config.animation)}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove("show"),super.dispose()}isShown(){return this._element.classList.contains("show")}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout(()=>{this.hide()},this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){fe.on(this._element,"mouseover.bs.toast",t=>this._onInteraction(t,!0)),fe.on(this._element,"mouseout.bs.toast",t=>this._onInteraction(t,!1)),fe.on(this._element,"focusin.bs.toast",t=>this._onInteraction(t,!0)),fe.on(this._element,"focusout.bs.toast",t=>this._onInteraction(t,!1))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=Fi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}Ae(Fi),Xt(Fi);var Bi=i(0);i.p;Object(Bi.a)((function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new Ei(t,{delay:{show:500,hide:100}})}))}))}]); \ No newline at end of file diff --git a/_static/scripts/pydata-sphinx-theme.js b/_static/scripts/pydata-sphinx-theme.js deleted file mode 100644 index 79e86a9cd..000000000 --- a/_static/scripts/pydata-sphinx-theme.js +++ /dev/null @@ -1 +0,0 @@ -!function(e){var t={};function o(n){if(t[n])return t[n].exports;var r=t[n]={i:n,l:!1,exports:{}};return e[n].call(r.exports,r,r.exports,o),r.l=!0,r.exports}o.m=e,o.c=t,o.d=function(e,t,n){o.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},o.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},o.t=function(e,t){if(1&t&&(e=o(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(o.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var r in e)o.d(n,r,function(t){return e[t]}.bind(null,r));return n},o.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return o.d(t,"a",t),t},o.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},o.p="",o(o.s=2)}([function(e,t,o){"use strict";function n(e){"loading"!=document.readyState?e():document.addEventListener("DOMContentLoaded",e)}o.d(t,"a",(function(){return n}))},,function(e,t,o){"use strict";o.r(t);var n=o(0),r=(o.p,window.matchMedia("(prefers-color-scheme: dark)"));function c(e){document.documentElement.dataset.theme=r.matches?"dark":"light"}function a(e){"light"!==e&&"dark"!==e&&"auto"!==e&&(console.error(`Got invalid theme mode: ${e}. Resetting to auto.`),e="auto");var t=r.matches?"dark":"light";document.documentElement.dataset.mode=e;var o="auto"==e?t:e;document.documentElement.dataset.theme=o,localStorage.setItem("mode",e),localStorage.setItem("theme",o),console.log(`[PST]: Changed to ${e} mode using the ${o} theme.`),r.onchange="auto"==e?c:""}function d(){const e=document.documentElement.dataset.defaultMode||"auto",t=localStorage.getItem("mode")||e;var o,n,c=r.matches?["auto","light","dark"]:["auto","dark","light"];a(((n=(o=c).indexOf(t)+1)===o.length&&(n=0),o[n]))}var l=()=>{let e=document.querySelectorAll("form.bd-search");return e.length?(1==e.length?e[0]:document.querySelector("div:not(.search-button__search-container) > form.bd-search")).querySelector("input"):void 0},i=()=>{let e=l(),t=document.querySelector(".search-button__wrapper");e===t.querySelector("input")&&t.classList.toggle("show"),document.activeElement===e?e.blur():(e.focus(),e.select(),e.scrollIntoView({block:"center"}))};function s(e){const t=DOCUMENTATION_OPTIONS.pagename+".html",o=e.target.getAttribute("href");let n=o.replace(t,"");return fetch(o,{method:"HEAD"}).then(()=>{location.href=o}).catch(e=>{location.href=n}),!1}var u=document.querySelectorAll(".version-switcher__button");u.length&&fetch(DOCUMENTATION_OPTIONS.theme_switcher_json_url).then(e=>e.json()).then(e=>{const t=DOCUMENTATION_OPTIONS.pagename+".html";u.forEach(e=>{e.dataset.activeVersionName="",e.dataset.activeVersion=""}),e.forEach(e=>{"name"in e||(e.name=e.version);const o=document.createElement("span");o.textContent=""+e.name;const n=document.createElement("a");n.setAttribute("class","list-group-item list-group-item-action py-1"),n.setAttribute("href",`${e.url}${t}`),n.appendChild(o),n.onclick=s,n.dataset.versionName=e.name,n.dataset.version=e.version,document.querySelector(".version-switcher__menu").append(n),"DOCUMENTATION_OPTIONS.version_switcher_version_match"==e.version&&(n.classList.add("active"),u.forEach(t=>{t.innerText=t.dataset.activeVersionName=e.name,t.dataset.activeVersion=e.version}))})}),Object(n.a)((function(){a(document.documentElement.dataset.mode),document.querySelectorAll(".theme-switch-button").forEach(e=>{e.addEventListener("click",d)})})),Object(n.a)((function(){if(!document.querySelector(".bd-docs-nav"))return;var e=document.querySelector("div.bd-sidebar");let t=parseInt(sessionStorage.getItem("sidebar-scroll-top"),10);if(isNaN(t)){var o=document.querySelector(".bd-docs-nav").querySelectorAll(".active");if(o.length>0){var n=o[o.length-1],r=n.getBoundingClientRect().y-e.getBoundingClientRect().y;if(n.getBoundingClientRect().y>.5*window.innerHeight){let t=.25;e.scrollTop=r-e.clientHeight*t,console.log("[PST]: Scrolled sidebar using last active link...")}}}else e.scrollTop=t,console.log("[PST]: Scrolled sidebar using stored browser position...");window.addEventListener("beforeunload",()=>{sessionStorage.setItem("sidebar-scroll-top",e.scrollTop)})})),Object(n.a)((function(){window.addEventListener("activate.bs.scrollspy",(function(){document.querySelectorAll(".bd-toc-nav a").forEach(e=>{e.parentElement.classList.remove("active")});document.querySelectorAll(".bd-toc-nav a.active").forEach(e=>{e.parentElement.classList.add("active")})}))})),Object(n.a)(()=>{(()=>{let e=document.querySelectorAll("form.bd-search");window.navigator.platform.toUpperCase().indexOf("MAC")>=0&&e.forEach(e=>e.querySelector("kbd.kbd-shortcut__modifier").innerText="⌘")})(),window.addEventListener("keydown",e=>{let t=l();(e.ctrlKey||e.metaKey)&&"KeyK"==e.code?(e.preventDefault(),i()):document.activeElement===t&&"Escape"==e.code&&i()},!0),document.querySelectorAll(".search-button__button").forEach(e=>{e.onclick=i});let e=document.querySelector(".search-button__overlay");e&&(e.onclick=i)}),Object(n.a)((function(){new MutationObserver((e,t)=>{e.forEach(e=>{0!==e.addedNodes.length&&void 0!==e.addedNodes[0].data&&-1!=e.addedNodes[0].data.search("Inserted RTD Footer")&&e.addedNodes.forEach(e=>{document.getElementById("rtd-footer-container").append(e)})})}).observe(document.body,{childList:!0})}))}]); \ No newline at end of file diff --git a/_static/searchtools.js b/_static/searchtools.js deleted file mode 100644 index 97d56a74d..000000000 --- a/_static/searchtools.js +++ /dev/null @@ -1,566 +0,0 @@ -/* - * searchtools.js - * ~~~~~~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for the full-text search. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ -"use strict"; - -/** - * Simple result scoring code. - */ -if (typeof Scorer === "undefined") { - var Scorer = { - // Implement the following function to further tweak the score for each result - // The function takes a result array [docname, title, anchor, descr, score, filename] - // and returns the new score. - /* - score: result => { - const [docname, title, anchor, descr, score, filename] = result - return score - }, - */ - - // query matches the full name of an object - objNameMatch: 11, - // or matches in the last dotted part of the object name - objPartialMatch: 6, - // Additive scores depending on the priority of the object - objPrio: { - 0: 15, // used to be importantResults - 1: 5, // used to be objectResults - 2: -5, // used to be unimportantResults - }, - // Used when the priority is not in the mapping. - objPrioDefault: 0, - - // query found in title - title: 15, - partialTitle: 7, - // query found in terms - term: 5, - partialTerm: 2, - }; -} - -const _removeChildren = (element) => { - while (element && element.lastChild) element.removeChild(element.lastChild); -}; - -/** - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping - */ -const _escapeRegExp = (string) => - string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string - -const _displayItem = (item, searchTerms) => { - const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; - const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; - const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; - const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; - const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; - - const [docName, title, anchor, descr, score, _filename] = item; - - let listItem = document.createElement("li"); - let requestUrl; - let linkUrl; - if (docBuilder === "dirhtml") { - // dirhtml builder - let dirname = docName + "/"; - if (dirname.match(/\/index\/$/)) - dirname = dirname.substring(0, dirname.length - 6); - else if (dirname === "index/") dirname = ""; - requestUrl = docUrlRoot + dirname; - linkUrl = requestUrl; - } else { - // normal html builders - requestUrl = docUrlRoot + docName + docFileSuffix; - linkUrl = docName + docLinkSuffix; - } - let linkEl = listItem.appendChild(document.createElement("a")); - linkEl.href = linkUrl + anchor; - linkEl.dataset.score = score; - linkEl.innerHTML = title; - if (descr) - listItem.appendChild(document.createElement("span")).innerHTML = - " (" + descr + ")"; - else if (showSearchSummary) - fetch(requestUrl) - .then((responseData) => responseData.text()) - .then((data) => { - if (data) - listItem.appendChild( - Search.makeSearchSummary(data, searchTerms) - ); - }); - Search.output.appendChild(listItem); -}; -const _finishSearch = (resultCount) => { - Search.stopPulse(); - Search.title.innerText = _("Search Results"); - if (!resultCount) - Search.status.innerText = Documentation.gettext( - "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." - ); - else - Search.status.innerText = _( - `Search finished, found ${resultCount} page(s) matching the search query.` - ); -}; -const _displayNextItem = ( - results, - resultCount, - searchTerms -) => { - // results left, load the summary and display it - // this is intended to be dynamic (don't sub resultsCount) - if (results.length) { - _displayItem(results.pop(), searchTerms); - setTimeout( - () => _displayNextItem(results, resultCount, searchTerms), - 5 - ); - } - // search finished, update title and status message - else _finishSearch(resultCount); -}; - -/** - * Default splitQuery function. Can be overridden in ``sphinx.search`` with a - * custom function per language. - * - * The regular expression works by splitting the string on consecutive characters - * that are not Unicode letters, numbers, underscores, or emoji characters. - * This is the same as ``\W+`` in Python, preserving the surrogate pair area. - */ -if (typeof splitQuery === "undefined") { - var splitQuery = (query) => query - .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) - .filter(term => term) // remove remaining empty strings -} - -/** - * Search Module - */ -const Search = { - _index: null, - _queued_query: null, - _pulse_status: -1, - - htmlToText: (htmlString) => { - const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); - htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); - const docContent = htmlElement.querySelector('[role="main"]'); - if (docContent !== undefined) return docContent.textContent; - console.warn( - "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." - ); - return ""; - }, - - init: () => { - const query = new URLSearchParams(window.location.search).get("q"); - document - .querySelectorAll('input[name="q"]') - .forEach((el) => (el.value = query)); - if (query) Search.performSearch(query); - }, - - loadIndex: (url) => - (document.body.appendChild(document.createElement("script")).src = url), - - setIndex: (index) => { - Search._index = index; - if (Search._queued_query !== null) { - const query = Search._queued_query; - Search._queued_query = null; - Search.query(query); - } - }, - - hasIndex: () => Search._index !== null, - - deferQuery: (query) => (Search._queued_query = query), - - stopPulse: () => (Search._pulse_status = -1), - - startPulse: () => { - if (Search._pulse_status >= 0) return; - - const pulse = () => { - Search._pulse_status = (Search._pulse_status + 1) % 4; - Search.dots.innerText = ".".repeat(Search._pulse_status); - if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); - }; - pulse(); - }, - - /** - * perform a search for something (or wait until index is loaded) - */ - performSearch: (query) => { - // create the required interface elements - const searchText = document.createElement("h2"); - searchText.textContent = _("Searching"); - const searchSummary = document.createElement("p"); - searchSummary.classList.add("search-summary"); - searchSummary.innerText = ""; - const searchList = document.createElement("ul"); - searchList.classList.add("search"); - - const out = document.getElementById("search-results"); - Search.title = out.appendChild(searchText); - Search.dots = Search.title.appendChild(document.createElement("span")); - Search.status = out.appendChild(searchSummary); - Search.output = out.appendChild(searchList); - - const searchProgress = document.getElementById("search-progress"); - // Some themes don't use the search progress node - if (searchProgress) { - searchProgress.innerText = _("Preparing search..."); - } - Search.startPulse(); - - // index already loaded, the browser was quick! - if (Search.hasIndex()) Search.query(query); - else Search.deferQuery(query); - }, - - /** - * execute search (requires search index to be loaded) - */ - query: (query) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - const allTitles = Search._index.alltitles; - const indexEntries = Search._index.indexentries; - - // stem the search terms and add them to the correct list - const stemmer = new Stemmer(); - const searchTerms = new Set(); - const excludedTerms = new Set(); - const highlightTerms = new Set(); - const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); - splitQuery(query.trim()).forEach((queryTerm) => { - const queryTermLower = queryTerm.toLowerCase(); - - // maybe skip this "word" - // stopwords array is from language_data.js - if ( - stopwords.indexOf(queryTermLower) !== -1 || - queryTerm.match(/^\d+$/) - ) - return; - - // stem the word - let word = stemmer.stemWord(queryTermLower); - // select the correct list - if (word[0] === "-") excludedTerms.add(word.substr(1)); - else { - searchTerms.add(word); - highlightTerms.add(queryTermLower); - } - }); - - if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js - localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) - } - - // console.debug("SEARCH: searching for:"); - // console.info("required: ", [...searchTerms]); - // console.info("excluded: ", [...excludedTerms]); - - // array of [docname, title, anchor, descr, score, filename] - let results = []; - _removeChildren(document.getElementById("search-progress")); - - const queryLower = query.toLowerCase(); - for (const [title, foundTitles] of Object.entries(allTitles)) { - if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { - for (const [file, id] of foundTitles) { - let score = Math.round(100 * queryLower.length / title.length) - results.push([ - docNames[file], - titles[file] !== title ? `${titles[file]} > ${title}` : title, - id !== null ? "#" + id : "", - null, - score, - filenames[file], - ]); - } - } - } - - // search for explicit entries in index directives - for (const [entry, foundEntries] of Object.entries(indexEntries)) { - if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { - for (const [file, id] of foundEntries) { - let score = Math.round(100 * queryLower.length / entry.length) - results.push([ - docNames[file], - titles[file], - id ? "#" + id : "", - null, - score, - filenames[file], - ]); - } - } - } - - // lookup as object - objectTerms.forEach((term) => - results.push(...Search.performObjectSearch(term, objectTerms)) - ); - - // lookup as search terms in fulltext - results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); - - // let the scorer override scores with a custom scoring function - if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); - - // now sort the results by score (in opposite order of appearance, since the - // display function below uses pop() to retrieve items) and then - // alphabetically - results.sort((a, b) => { - const leftScore = a[4]; - const rightScore = b[4]; - if (leftScore === rightScore) { - // same score: sort alphabetically - const leftTitle = a[1].toLowerCase(); - const rightTitle = b[1].toLowerCase(); - if (leftTitle === rightTitle) return 0; - return leftTitle > rightTitle ? -1 : 1; // inverted is intentional - } - return leftScore > rightScore ? 1 : -1; - }); - - // remove duplicate search results - // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept - let seen = new Set(); - results = results.reverse().reduce((acc, result) => { - let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); - if (!seen.has(resultStr)) { - acc.push(result); - seen.add(resultStr); - } - return acc; - }, []); - - results = results.reverse(); - - // for debugging - //Search.lastresults = results.slice(); // a copy - // console.info("search results:", Search.lastresults); - - // print the results - _displayNextItem(results, results.length, searchTerms); - }, - - /** - * search for object names - */ - performObjectSearch: (object, objectTerms) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const objects = Search._index.objects; - const objNames = Search._index.objnames; - const titles = Search._index.titles; - - const results = []; - - const objectSearchCallback = (prefix, match) => { - const name = match[4] - const fullname = (prefix ? prefix + "." : "") + name; - const fullnameLower = fullname.toLowerCase(); - if (fullnameLower.indexOf(object) < 0) return; - - let score = 0; - const parts = fullnameLower.split("."); - - // check for different match types: exact matches of full name or - // "last name" (i.e. last dotted part) - if (fullnameLower === object || parts.slice(-1)[0] === object) - score += Scorer.objNameMatch; - else if (parts.slice(-1)[0].indexOf(object) > -1) - score += Scorer.objPartialMatch; // matches in last name - - const objName = objNames[match[1]][2]; - const title = titles[match[0]]; - - // If more than one term searched for, we require other words to be - // found in the name/title/description - const otherTerms = new Set(objectTerms); - otherTerms.delete(object); - if (otherTerms.size > 0) { - const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); - if ( - [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) - ) - return; - } - - let anchor = match[3]; - if (anchor === "") anchor = fullname; - else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; - - const descr = objName + _(", in ") + title; - - // add custom score for some objects according to scorer - if (Scorer.objPrio.hasOwnProperty(match[2])) - score += Scorer.objPrio[match[2]]; - else score += Scorer.objPrioDefault; - - results.push([ - docNames[match[0]], - fullname, - "#" + anchor, - descr, - score, - filenames[match[0]], - ]); - }; - Object.keys(objects).forEach((prefix) => - objects[prefix].forEach((array) => - objectSearchCallback(prefix, array) - ) - ); - return results; - }, - - /** - * search for full-text terms in the index - */ - performTermsSearch: (searchTerms, excludedTerms) => { - // prepare search - const terms = Search._index.terms; - const titleTerms = Search._index.titleterms; - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - - const scoreMap = new Map(); - const fileMap = new Map(); - - // perform the search on the required terms - searchTerms.forEach((word) => { - const files = []; - const arr = [ - { files: terms[word], score: Scorer.term }, - { files: titleTerms[word], score: Scorer.title }, - ]; - // add support for partial matches - if (word.length > 2) { - const escapedWord = _escapeRegExp(word); - Object.keys(terms).forEach((term) => { - if (term.match(escapedWord) && !terms[word]) - arr.push({ files: terms[term], score: Scorer.partialTerm }); - }); - Object.keys(titleTerms).forEach((term) => { - if (term.match(escapedWord) && !titleTerms[word]) - arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); - }); - } - - // no match but word was a required one - if (arr.every((record) => record.files === undefined)) return; - - // found search word in contents - arr.forEach((record) => { - if (record.files === undefined) return; - - let recordFiles = record.files; - if (recordFiles.length === undefined) recordFiles = [recordFiles]; - files.push(...recordFiles); - - // set score for the word in each file - recordFiles.forEach((file) => { - if (!scoreMap.has(file)) scoreMap.set(file, {}); - scoreMap.get(file)[word] = record.score; - }); - }); - - // create the mapping - files.forEach((file) => { - if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) - fileMap.get(file).push(word); - else fileMap.set(file, [word]); - }); - }); - - // now check if the files don't contain excluded terms - const results = []; - for (const [file, wordList] of fileMap) { - // check if all requirements are matched - - // as search terms with length < 3 are discarded - const filteredTermCount = [...searchTerms].filter( - (term) => term.length > 2 - ).length; - if ( - wordList.length !== searchTerms.size && - wordList.length !== filteredTermCount - ) - continue; - - // ensure that none of the excluded terms is in the search result - if ( - [...excludedTerms].some( - (term) => - terms[term] === file || - titleTerms[term] === file || - (terms[term] || []).includes(file) || - (titleTerms[term] || []).includes(file) - ) - ) - break; - - // select one (max) score for the file. - const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); - // add result to the result list - results.push([ - docNames[file], - titles[file], - "", - null, - score, - filenames[file], - ]); - } - return results; - }, - - /** - * helper function to return a node containing the - * search summary for a given text. keywords is a list - * of stemmed words. - */ - makeSearchSummary: (htmlText, keywords) => { - const text = Search.htmlToText(htmlText); - if (text === "") return null; - - const textLower = text.toLowerCase(); - const actualStartPosition = [...keywords] - .map((k) => textLower.indexOf(k.toLowerCase())) - .filter((i) => i > -1) - .slice(-1)[0]; - const startWithContext = Math.max(actualStartPosition - 120, 0); - - const top = startWithContext === 0 ? "" : "..."; - const tail = startWithContext + 240 < text.length ? "..." : ""; - - let summary = document.createElement("p"); - summary.classList.add("context"); - summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; - - return summary; - }, -}; - -_ready(Search.init); diff --git a/_static/sphinx-design.4cbf315f70debaebd550c87a6162cf0f.min.css b/_static/sphinx-design.4cbf315f70debaebd550c87a6162cf0f.min.css deleted file mode 100644 index 860c36da0..000000000 --- a/_static/sphinx-design.4cbf315f70debaebd550c87a6162cf0f.min.css +++ /dev/null @@ -1 +0,0 @@ -.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative;font-size:var(--sd-fontsize-dropdown)}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary.sd-summary-title{padding:.5em .6em .5em 1em;font-size:var(--sd-fontsize-dropdown-title);font-weight:var(--sd-fontweight-dropdown-title);user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;list-style:none;display:inline-flex;justify-content:space-between}details.sd-dropdown summary.sd-summary-title::-webkit-details-marker{display:none}details.sd-dropdown summary.sd-summary-title:focus{outline:none}details.sd-dropdown summary.sd-summary-title .sd-summary-icon{margin-right:.6em;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary.sd-summary-title .sd-summary-text{flex-grow:1;line-height:1.5;padding-right:.5rem}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker{pointer-events:none;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker svg{opacity:.6}details.sd-dropdown summary.sd-summary-title:hover .sd-summary-state-marker svg{opacity:1;transform:scale(1.1)}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown .sd-summary-chevron-right{transition:.25s}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-right{transform:rotate(90deg)}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-down{transform:rotate(180deg)}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #0071bc;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0060a0;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-bg: rgba(0, 113, 188, 0.2);--sd-color-secondary-bg: rgba(108, 117, 125, 0.2);--sd-color-success-bg: rgba(40, 167, 69, 0.2);--sd-color-info-bg: rgba(23, 162, 184, 0.2);--sd-color-warning-bg: rgba(240, 179, 126, 0.2);--sd-color-danger-bg: rgba(220, 53, 69, 0.2);--sd-color-light-bg: rgba(248, 249, 250, 0.2);--sd-color-muted-bg: rgba(108, 117, 125, 0.2);--sd-color-dark-bg: rgba(33, 37, 41, 0.2);--sd-color-black-bg: rgba(0, 0, 0, 0.2);--sd-color-white-bg: rgba(255, 255, 255, 0.2);--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem;--sd-fontsize-dropdown: inherit;--sd-fontsize-dropdown-title: 1rem;--sd-fontweight-dropdown-title: 700} diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js deleted file mode 100644 index aae669d7e..000000000 --- a/_static/sphinx_highlight.js +++ /dev/null @@ -1,144 +0,0 @@ -/* Highlighting utilities for Sphinx HTML documentation. */ -"use strict"; - -const SPHINX_HIGHLIGHT_ENABLED = true - -/** - * highlight a given string on a node by wrapping it in - * span elements with the given class name. - */ -const _highlight = (node, addItems, text, className) => { - if (node.nodeType === Node.TEXT_NODE) { - const val = node.nodeValue; - const parent = node.parentNode; - const pos = val.toLowerCase().indexOf(text); - if ( - pos >= 0 && - !parent.classList.contains(className) && - !parent.classList.contains("nohighlight") - ) { - let span; - - const closestNode = parent.closest("body, svg, foreignObject"); - const isInSVG = closestNode && closestNode.matches("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.classList.add(className); - } - - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - parent.insertBefore( - span, - parent.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling - ) - ); - node.nodeValue = val.substr(0, pos); - - if (isInSVG) { - const rect = document.createElementNS( - "http://www.w3.org/2000/svg", - "rect" - ); - const bbox = parent.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute("class", className); - addItems.push({ parent: parent, target: rect }); - } - } - } else if (node.matches && !node.matches("button, select, textarea")) { - node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); - } -}; -const _highlightText = (thisNode, text, className) => { - let addItems = []; - _highlight(thisNode, addItems, text, className); - addItems.forEach((obj) => - obj.parent.insertAdjacentElement("beforebegin", obj.target) - ); -}; - -/** - * Small JavaScript module for the documentation. - */ -const SphinxHighlight = { - - /** - * highlight the search words provided in localstorage in the text - */ - highlightSearchWords: () => { - if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight - - // get and clear terms from localstorage - const url = new URL(window.location); - const highlight = - localStorage.getItem("sphinx_highlight_terms") - || url.searchParams.get("highlight") - || ""; - localStorage.removeItem("sphinx_highlight_terms") - url.searchParams.delete("highlight"); - window.history.replaceState({}, "", url); - - // get individual terms from highlight string - const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); - if (terms.length === 0) return; // nothing to do - - // There should never be more than one element matching "div.body" - const divBody = document.querySelectorAll("div.body"); - const body = divBody.length ? divBody[0] : document.querySelector("body"); - window.setTimeout(() => { - terms.forEach((term) => _highlightText(body, term, "highlighted")); - }, 10); - - const searchBox = document.getElementById("searchbox"); - if (searchBox === null) return; - searchBox.appendChild( - document - .createRange() - .createContextualFragment( - '" - ) - ); - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords: () => { - document - .querySelectorAll("#searchbox .highlight-link") - .forEach((el) => el.remove()); - document - .querySelectorAll("span.highlighted") - .forEach((el) => el.classList.remove("highlighted")); - localStorage.removeItem("sphinx_highlight_terms") - }, - - initEscapeListener: () => { - // only install a listener if it is really needed - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; - - document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; - if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { - SphinxHighlight.hideSearchWords(); - event.preventDefault(); - } - }); - }, -}; - -_ready(SphinxHighlight.highlightSearchWords); -_ready(SphinxHighlight.initEscapeListener); diff --git a/_static/styles/bootstrap.css b/_static/styles/bootstrap.css deleted file mode 100644 index b258fd9ea..000000000 --- a/_static/styles/bootstrap.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap v5.2.2 (https://getbootstrap.com/) - * Copyright 2011-2022 The Bootstrap Authors - * Copyright 2011-2022 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) - */:root{--bs-blue:#0d6efd;--bs-indigo:#6610f2;--bs-purple:#6f42c1;--bs-pink:#d63384;--bs-red:#dc3545;--bs-orange:#fd7e14;--bs-yellow:#ffc107;--bs-green:#198754;--bs-teal:#20c997;--bs-cyan:#0dcaf0;--bs-black:#000;--bs-white:#fff;--bs-gray:#6c757d;--bs-gray-dark:#343a40;--bs-gray-100:#f8f9fa;--bs-gray-200:#e9ecef;--bs-gray-300:#dee2e6;--bs-gray-400:#ced4da;--bs-gray-500:#adb5bd;--bs-gray-600:#6c757d;--bs-gray-700:#495057;--bs-gray-800:#343a40;--bs-gray-900:#212529;--bs-primary:#0d6efd;--bs-secondary:#6c757d;--bs-success:#198754;--bs-info:#0dcaf0;--bs-warning:#ffc107;--bs-danger:#dc3545;--bs-light:#f8f9fa;--bs-dark:#212529;--bs-primary-rgb:13,110,253;--bs-secondary-rgb:108,117,125;--bs-success-rgb:25,135,84;--bs-info-rgb:13,202,240;--bs-warning-rgb:255,193,7;--bs-danger-rgb:220,53,69;--bs-light-rgb:248,249,250;--bs-dark-rgb:33,37,41;--bs-white-rgb:255,255,255;--bs-black-rgb:0,0,0;--bs-body-color-rgb:33,37,41;--bs-body-bg-rgb:255,255,255;--bs-font-sans-serif:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue","Noto Sans","Liberation Sans",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--bs-font-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--bs-gradient:linear-gradient(180deg,hsla(0,0%,100%,.15),hsla(0,0%,100%,0));--bs-body-font-family:var(--bs-font-sans-serif);--bs-body-font-size:1rem;--bs-body-font-weight:400;--bs-body-line-height:1.5;--bs-body-color:#212529;--bs-body-bg:#fff;--bs-border-width:1px;--bs-border-style:solid;--bs-border-color:#dee2e6;--bs-border-color-translucent:rgba(0,0,0,.175);--bs-border-radius:.375rem;--bs-border-radius-sm:.25rem;--bs-border-radius-lg:.5rem;--bs-border-radius-xl:1rem;--bs-border-radius-2xl:2rem;--bs-border-radius-pill:50rem;--bs-link-color:#0d6efd;--bs-link-hover-color:#0a58ca;--bs-code-color:#d63384;--bs-highlight-bg:#fff3cd}*,:after,:before{box-sizing:border-box}@media (prefers-reduced-motion:no-preference){:root{scroll-behavior:smooth}}body{-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:rgba(0,0,0,0);background-color:var(--bs-body-bg);color:var(--bs-body-color);font-family:var(--bs-body-font-family);font-size:var(--bs-body-font-size);font-weight:var(--bs-body-font-weight);line-height:var(--bs-body-line-height);margin:0;text-align:var(--bs-body-text-align)}hr{border:0;border-top:1px solid;color:inherit;margin:1rem 0;opacity:.25}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{font-weight:500;line-height:1.2;margin-bottom:.5rem;margin-top:0}.h1,h1{font-size:calc(1.375rem + 1.5vw)}@media (min-width:1200px){.h1,h1{font-size:2.5rem}}.h2,h2{font-size:calc(1.325rem + .9vw)}@media (min-width:1200px){.h2,h2{font-size:2rem}}.h3,h3{font-size:calc(1.3rem + .6vw)}@media (min-width:1200px){.h3,h3{font-size:1.75rem}}.h4,h4{font-size:calc(1.275rem + .3vw)}@media (min-width:1200px){.h4,h4{font-size:1.5rem}}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}p{margin-bottom:1rem;margin-top:0}abbr[title]{cursor:help;text-decoration:underline dotted;text-decoration-skip-ink:none}address{font-style:normal;line-height:inherit;margin-bottom:1rem}ol,ul{padding-left:2rem}dl,ol,ul{margin-bottom:1rem;margin-top:0}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}.small,small{font-size:.875em}.mark,mark{background-color:var(--bs-highlight-bg);padding:.1875em}sub,sup{font-size:.75em;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:var(--bs-link-color);text-decoration:underline}a:hover{color:var(--bs-link-hover-color)}a:not([href]):not([class]),a:not([href]):not([class]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:var(--bs-font-monospace);font-size:1em}pre{display:block;font-size:.875em;margin-bottom:1rem;margin-top:0;overflow:auto}pre code{color:inherit;font-size:inherit;word-break:normal}code{word-wrap:break-word;color:var(--bs-code-color);font-size:.875em}a>code{color:inherit}kbd{background-color:var(--bs-body-color);border-radius:.25rem;color:var(--bs-body-bg);font-size:.875em;padding:.1875rem .375rem}kbd kbd{font-size:1em;padding:0}figure{margin:0 0 1rem}img,svg{vertical-align:middle}table{border-collapse:collapse;caption-side:bottom}caption{color:#6c757d;padding-bottom:.5rem;padding-top:.5rem;text-align:left}th{text-align:inherit;text-align:-webkit-match-parent}tbody,td,tfoot,th,thead,tr{border:0 solid;border-color:inherit}label{display:inline-block}button{border-radius:0}button:focus:not(:focus-visible){outline:0}button,input,optgroup,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit;margin:0}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}select:disabled{opacity:1}[list]:not([type=date]):not([type=datetime-local]):not([type=month]):not([type=week]):not([type=time])::-webkit-calendar-picker-indicator{display:none!important}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}::-moz-focus-inner{border-style:none;padding:0}textarea{resize:vertical}fieldset{border:0;margin:0;min-width:0;padding:0}legend{float:left;font-size:calc(1.275rem + .3vw);line-height:inherit;margin-bottom:.5rem;padding:0;width:100%}@media (min-width:1200px){legend{font-size:1.5rem}}legend+*{clear:left}::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-fields-wrapper,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-minute,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-text,::-webkit-datetime-edit-year-field{padding:0}::-webkit-inner-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-color-swatch-wrapper{padding:0}::file-selector-button{-webkit-appearance:button;font:inherit}output{display:inline-block}iframe{border:0}summary{cursor:pointer;display:list-item}progress{vertical-align:baseline}[hidden]{display:none!important}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:calc(1.625rem + 4.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-1{font-size:5rem}}.display-2{font-size:calc(1.575rem + 3.9vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-2{font-size:4.5rem}}.display-3{font-size:calc(1.525rem + 3.3vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-3{font-size:4rem}}.display-4{font-size:calc(1.475rem + 2.7vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-4{font-size:3.5rem}}.display-5{font-size:calc(1.425rem + 2.1vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-5{font-size:3rem}}.display-6{font-size:calc(1.375rem + 1.5vw);font-weight:300;line-height:1.2}@media (min-width:1200px){.display-6{font-size:2.5rem}}.list-inline,.list-unstyled{list-style:none;padding-left:0}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:.875em;text-transform:uppercase}.blockquote{font-size:1.25rem;margin-bottom:1rem}.blockquote>:last-child{margin-bottom:0}.blockquote-footer{color:#6c757d;font-size:.875em;margin-bottom:1rem;margin-top:-1rem}.blockquote-footer:before{content:"\2014\00A0"}.img-fluid,.img-thumbnail{height:auto;max-width:100%}.img-thumbnail{background-color:#fff;border:1px solid var(--bs-border-color);border-radius:.375rem;padding:.25rem}.figure{display:inline-block}.figure-img{line-height:1;margin-bottom:.5rem}.figure-caption{color:#6c757d;font-size:.875em}.container,.container-fluid,.container-lg,.container-md,.container-sm,.container-xl{--bs-gutter-x:1.5rem;--bs-gutter-y:0;margin-left:auto;margin-right:auto;padding-left:calc(var(--bs-gutter-x)*.5);padding-right:calc(var(--bs-gutter-x)*.5);width:100%}@media (min-width:540px){.container,.container-sm{max-width:540px}}@media (min-width:720px){.container,.container-md,.container-sm{max-width:720px}}@media (min-width:960px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media (min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1400px}}.row{--bs-gutter-x:1.5rem;--bs-gutter-y:0;display:flex;flex-wrap:wrap;margin-left:calc(var(--bs-gutter-x)*-.5);margin-right:calc(var(--bs-gutter-x)*-.5);margin-top:calc(var(--bs-gutter-y)*-1)}.row>*{flex-shrink:0;margin-top:var(--bs-gutter-y);max-width:100%;padding-left:calc(var(--bs-gutter-x)*.5);padding-right:calc(var(--bs-gutter-x)*.5);width:100%}.col{flex:1 0 0%}.row-cols-auto>*{flex:0 0 auto;width:auto}.row-cols-1>*{flex:0 0 auto;width:100%}.row-cols-2>*{flex:0 0 auto;width:50%}.row-cols-3>*{flex:0 0 auto;width:33.33333%}.row-cols-4>*{flex:0 0 auto;width:25%}.row-cols-5>*{flex:0 0 auto;width:20%}.row-cols-6>*{flex:0 0 auto;width:16.66667%}.col-auto{flex:0 0 auto;width:auto}.col-1{flex:0 0 auto;width:8.33333%}.col-2{flex:0 0 auto;width:16.66667%}.col-3{flex:0 0 auto;width:25%}.col-4{flex:0 0 auto;width:33.33333%}.col-5{flex:0 0 auto;width:41.66667%}.col-6{flex:0 0 auto;width:50%}.col-7{flex:0 0 auto;width:58.33333%}.col-8{flex:0 0 auto;width:66.66667%}.col-9{flex:0 0 auto;width:75%}.col-10{flex:0 0 auto;width:83.33333%}.col-11{flex:0 0 auto;width:91.66667%}.col-12{flex:0 0 auto;width:100%}.offset-1{margin-left:8.33333%}.offset-2{margin-left:16.66667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.33333%}.offset-5{margin-left:41.66667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.33333%}.offset-8{margin-left:66.66667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.33333%}.offset-11{margin-left:91.66667%}.g-0,.gx-0{--bs-gutter-x:0}.g-0,.gy-0{--bs-gutter-y:0}.g-1,.gx-1{--bs-gutter-x:.25rem}.g-1,.gy-1{--bs-gutter-y:.25rem}.g-2,.gx-2{--bs-gutter-x:.5rem}.g-2,.gy-2{--bs-gutter-y:.5rem}.g-3,.gx-3{--bs-gutter-x:1rem}.g-3,.gy-3{--bs-gutter-y:1rem}.g-4,.gx-4{--bs-gutter-x:1.5rem}.g-4,.gy-4{--bs-gutter-y:1.5rem}.g-5,.gx-5{--bs-gutter-x:3rem}.g-5,.gy-5{--bs-gutter-y:3rem}@media (min-width:540px){.col-sm{flex:1 0 0%}.row-cols-sm-auto>*{flex:0 0 auto;width:auto}.row-cols-sm-1>*{flex:0 0 auto;width:100%}.row-cols-sm-2>*{flex:0 0 auto;width:50%}.row-cols-sm-3>*{flex:0 0 auto;width:33.33333%}.row-cols-sm-4>*{flex:0 0 auto;width:25%}.row-cols-sm-5>*{flex:0 0 auto;width:20%}.row-cols-sm-6>*{flex:0 0 auto;width:16.66667%}.col-sm-auto{flex:0 0 auto;width:auto}.col-sm-1{flex:0 0 auto;width:8.33333%}.col-sm-2{flex:0 0 auto;width:16.66667%}.col-sm-3{flex:0 0 auto;width:25%}.col-sm-4{flex:0 0 auto;width:33.33333%}.col-sm-5{flex:0 0 auto;width:41.66667%}.col-sm-6{flex:0 0 auto;width:50%}.col-sm-7{flex:0 0 auto;width:58.33333%}.col-sm-8{flex:0 0 auto;width:66.66667%}.col-sm-9{flex:0 0 auto;width:75%}.col-sm-10{flex:0 0 auto;width:83.33333%}.col-sm-11{flex:0 0 auto;width:91.66667%}.col-sm-12{flex:0 0 auto;width:100%}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.33333%}.offset-sm-2{margin-left:16.66667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.33333%}.offset-sm-5{margin-left:41.66667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.33333%}.offset-sm-8{margin-left:66.66667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.33333%}.offset-sm-11{margin-left:91.66667%}.g-sm-0,.gx-sm-0{--bs-gutter-x:0}.g-sm-0,.gy-sm-0{--bs-gutter-y:0}.g-sm-1,.gx-sm-1{--bs-gutter-x:.25rem}.g-sm-1,.gy-sm-1{--bs-gutter-y:.25rem}.g-sm-2,.gx-sm-2{--bs-gutter-x:.5rem}.g-sm-2,.gy-sm-2{--bs-gutter-y:.5rem}.g-sm-3,.gx-sm-3{--bs-gutter-x:1rem}.g-sm-3,.gy-sm-3{--bs-gutter-y:1rem}.g-sm-4,.gx-sm-4{--bs-gutter-x:1.5rem}.g-sm-4,.gy-sm-4{--bs-gutter-y:1.5rem}.g-sm-5,.gx-sm-5{--bs-gutter-x:3rem}.g-sm-5,.gy-sm-5{--bs-gutter-y:3rem}}@media (min-width:720px){.col-md{flex:1 0 0%}.row-cols-md-auto>*{flex:0 0 auto;width:auto}.row-cols-md-1>*{flex:0 0 auto;width:100%}.row-cols-md-2>*{flex:0 0 auto;width:50%}.row-cols-md-3>*{flex:0 0 auto;width:33.33333%}.row-cols-md-4>*{flex:0 0 auto;width:25%}.row-cols-md-5>*{flex:0 0 auto;width:20%}.row-cols-md-6>*{flex:0 0 auto;width:16.66667%}.col-md-auto{flex:0 0 auto;width:auto}.col-md-1{flex:0 0 auto;width:8.33333%}.col-md-2{flex:0 0 auto;width:16.66667%}.col-md-3{flex:0 0 auto;width:25%}.col-md-4{flex:0 0 auto;width:33.33333%}.col-md-5{flex:0 0 auto;width:41.66667%}.col-md-6{flex:0 0 auto;width:50%}.col-md-7{flex:0 0 auto;width:58.33333%}.col-md-8{flex:0 0 auto;width:66.66667%}.col-md-9{flex:0 0 auto;width:75%}.col-md-10{flex:0 0 auto;width:83.33333%}.col-md-11{flex:0 0 auto;width:91.66667%}.col-md-12{flex:0 0 auto;width:100%}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.33333%}.offset-md-2{margin-left:16.66667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.33333%}.offset-md-5{margin-left:41.66667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.33333%}.offset-md-8{margin-left:66.66667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.33333%}.offset-md-11{margin-left:91.66667%}.g-md-0,.gx-md-0{--bs-gutter-x:0}.g-md-0,.gy-md-0{--bs-gutter-y:0}.g-md-1,.gx-md-1{--bs-gutter-x:.25rem}.g-md-1,.gy-md-1{--bs-gutter-y:.25rem}.g-md-2,.gx-md-2{--bs-gutter-x:.5rem}.g-md-2,.gy-md-2{--bs-gutter-y:.5rem}.g-md-3,.gx-md-3{--bs-gutter-x:1rem}.g-md-3,.gy-md-3{--bs-gutter-y:1rem}.g-md-4,.gx-md-4{--bs-gutter-x:1.5rem}.g-md-4,.gy-md-4{--bs-gutter-y:1.5rem}.g-md-5,.gx-md-5{--bs-gutter-x:3rem}.g-md-5,.gy-md-5{--bs-gutter-y:3rem}}@media (min-width:960px){.col-lg{flex:1 0 0%}.row-cols-lg-auto>*{flex:0 0 auto;width:auto}.row-cols-lg-1>*{flex:0 0 auto;width:100%}.row-cols-lg-2>*{flex:0 0 auto;width:50%}.row-cols-lg-3>*{flex:0 0 auto;width:33.33333%}.row-cols-lg-4>*{flex:0 0 auto;width:25%}.row-cols-lg-5>*{flex:0 0 auto;width:20%}.row-cols-lg-6>*{flex:0 0 auto;width:16.66667%}.col-lg-auto{flex:0 0 auto;width:auto}.col-lg-1{flex:0 0 auto;width:8.33333%}.col-lg-2{flex:0 0 auto;width:16.66667%}.col-lg-3{flex:0 0 auto;width:25%}.col-lg-4{flex:0 0 auto;width:33.33333%}.col-lg-5{flex:0 0 auto;width:41.66667%}.col-lg-6{flex:0 0 auto;width:50%}.col-lg-7{flex:0 0 auto;width:58.33333%}.col-lg-8{flex:0 0 auto;width:66.66667%}.col-lg-9{flex:0 0 auto;width:75%}.col-lg-10{flex:0 0 auto;width:83.33333%}.col-lg-11{flex:0 0 auto;width:91.66667%}.col-lg-12{flex:0 0 auto;width:100%}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.33333%}.offset-lg-2{margin-left:16.66667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.33333%}.offset-lg-5{margin-left:41.66667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.33333%}.offset-lg-8{margin-left:66.66667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.33333%}.offset-lg-11{margin-left:91.66667%}.g-lg-0,.gx-lg-0{--bs-gutter-x:0}.g-lg-0,.gy-lg-0{--bs-gutter-y:0}.g-lg-1,.gx-lg-1{--bs-gutter-x:.25rem}.g-lg-1,.gy-lg-1{--bs-gutter-y:.25rem}.g-lg-2,.gx-lg-2{--bs-gutter-x:.5rem}.g-lg-2,.gy-lg-2{--bs-gutter-y:.5rem}.g-lg-3,.gx-lg-3{--bs-gutter-x:1rem}.g-lg-3,.gy-lg-3{--bs-gutter-y:1rem}.g-lg-4,.gx-lg-4{--bs-gutter-x:1.5rem}.g-lg-4,.gy-lg-4{--bs-gutter-y:1.5rem}.g-lg-5,.gx-lg-5{--bs-gutter-x:3rem}.g-lg-5,.gy-lg-5{--bs-gutter-y:3rem}}@media (min-width:1200px){.col-xl{flex:1 0 0%}.row-cols-xl-auto>*{flex:0 0 auto;width:auto}.row-cols-xl-1>*{flex:0 0 auto;width:100%}.row-cols-xl-2>*{flex:0 0 auto;width:50%}.row-cols-xl-3>*{flex:0 0 auto;width:33.33333%}.row-cols-xl-4>*{flex:0 0 auto;width:25%}.row-cols-xl-5>*{flex:0 0 auto;width:20%}.row-cols-xl-6>*{flex:0 0 auto;width:16.66667%}.col-xl-auto{flex:0 0 auto;width:auto}.col-xl-1{flex:0 0 auto;width:8.33333%}.col-xl-2{flex:0 0 auto;width:16.66667%}.col-xl-3{flex:0 0 auto;width:25%}.col-xl-4{flex:0 0 auto;width:33.33333%}.col-xl-5{flex:0 0 auto;width:41.66667%}.col-xl-6{flex:0 0 auto;width:50%}.col-xl-7{flex:0 0 auto;width:58.33333%}.col-xl-8{flex:0 0 auto;width:66.66667%}.col-xl-9{flex:0 0 auto;width:75%}.col-xl-10{flex:0 0 auto;width:83.33333%}.col-xl-11{flex:0 0 auto;width:91.66667%}.col-xl-12{flex:0 0 auto;width:100%}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.33333%}.offset-xl-2{margin-left:16.66667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.33333%}.offset-xl-5{margin-left:41.66667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.33333%}.offset-xl-8{margin-left:66.66667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.33333%}.offset-xl-11{margin-left:91.66667%}.g-xl-0,.gx-xl-0{--bs-gutter-x:0}.g-xl-0,.gy-xl-0{--bs-gutter-y:0}.g-xl-1,.gx-xl-1{--bs-gutter-x:.25rem}.g-xl-1,.gy-xl-1{--bs-gutter-y:.25rem}.g-xl-2,.gx-xl-2{--bs-gutter-x:.5rem}.g-xl-2,.gy-xl-2{--bs-gutter-y:.5rem}.g-xl-3,.gx-xl-3{--bs-gutter-x:1rem}.g-xl-3,.gy-xl-3{--bs-gutter-y:1rem}.g-xl-4,.gx-xl-4{--bs-gutter-x:1.5rem}.g-xl-4,.gy-xl-4{--bs-gutter-y:1.5rem}.g-xl-5,.gx-xl-5{--bs-gutter-x:3rem}.g-xl-5,.gy-xl-5{--bs-gutter-y:3rem}}.table{--bs-table-color:var(--bs-body-color);--bs-table-bg:transparent;--bs-table-border-color:var(--bs-border-color);--bs-table-accent-bg:transparent;--bs-table-striped-color:var(--bs-body-color);--bs-table-striped-bg:rgba(0,0,0,.05);--bs-table-active-color:var(--bs-body-color);--bs-table-active-bg:rgba(0,0,0,.1);--bs-table-hover-color:var(--bs-body-color);--bs-table-hover-bg:rgba(0,0,0,.075);border-color:var(--bs-table-border-color);color:var(--bs-table-color);margin-bottom:1rem;vertical-align:top;width:100%}.table>:not(caption)>*>*{background-color:var(--bs-table-bg);border-bottom-width:1px;box-shadow:inset 0 0 0 9999px var(--bs-table-accent-bg);padding:.5rem}.table>tbody{vertical-align:inherit}.table>thead{vertical-align:bottom}.table-group-divider{border-top:2px solid}.caption-top{caption-side:top}.table-sm>:not(caption)>*>*{padding:.25rem}.table-bordered>:not(caption)>*{border-width:1px 0}.table-bordered>:not(caption)>*>*{border-width:0 1px}.table-borderless>:not(caption)>*>*{border-bottom-width:0}.table-borderless>:not(:first-child){border-top-width:0}.table-striped-columns>:not(caption)>tr>:nth-child(2n),.table-striped>tbody>tr:nth-of-type(odd)>*{--bs-table-accent-bg:var(--bs-table-striped-bg);color:var(--bs-table-striped-color)}.table-active{--bs-table-accent-bg:var(--bs-table-active-bg);color:var(--bs-table-active-color)}.table-hover>tbody>tr:hover>*{--bs-table-accent-bg:var(--bs-table-hover-bg);color:var(--bs-table-hover-color)}.table-primary{--bs-table-color:#000;--bs-table-bg:#cfe2ff;--bs-table-border-color:#bacbe6;--bs-table-striped-bg:#c5d7f2;--bs-table-striped-color:#000;--bs-table-active-bg:#bacbe6;--bs-table-active-color:#000;--bs-table-hover-bg:#bfd1ec;--bs-table-hover-color:#000}.table-primary,.table-secondary{border-color:var(--bs-table-border-color);color:var(--bs-table-color)}.table-secondary{--bs-table-color:#000;--bs-table-bg:#e2e3e5;--bs-table-border-color:#cbccce;--bs-table-striped-bg:#d7d8da;--bs-table-striped-color:#000;--bs-table-active-bg:#cbccce;--bs-table-active-color:#000;--bs-table-hover-bg:#d1d2d4;--bs-table-hover-color:#000}.table-success{--bs-table-color:#000;--bs-table-bg:#d1e7dd;--bs-table-border-color:#bcd0c7;--bs-table-striped-bg:#c7dbd2;--bs-table-striped-color:#000;--bs-table-active-bg:#bcd0c7;--bs-table-active-color:#000;--bs-table-hover-bg:#c1d6cc;--bs-table-hover-color:#000}.table-info,.table-success{border-color:var(--bs-table-border-color);color:var(--bs-table-color)}.table-info{--bs-table-color:#000;--bs-table-bg:#cff4fc;--bs-table-border-color:#badce3;--bs-table-striped-bg:#c5e8ef;--bs-table-striped-color:#000;--bs-table-active-bg:#badce3;--bs-table-active-color:#000;--bs-table-hover-bg:#bfe2e9;--bs-table-hover-color:#000}.table-warning{--bs-table-color:#000;--bs-table-bg:#fff3cd;--bs-table-border-color:#e6dbb9;--bs-table-striped-bg:#f2e7c3;--bs-table-striped-color:#000;--bs-table-active-bg:#e6dbb9;--bs-table-active-color:#000;--bs-table-hover-bg:#ece1be;--bs-table-hover-color:#000}.table-danger,.table-warning{border-color:var(--bs-table-border-color);color:var(--bs-table-color)}.table-danger{--bs-table-color:#000;--bs-table-bg:#f8d7da;--bs-table-border-color:#dfc2c4;--bs-table-striped-bg:#eccccf;--bs-table-striped-color:#000;--bs-table-active-bg:#dfc2c4;--bs-table-active-color:#000;--bs-table-hover-bg:#e5c7ca;--bs-table-hover-color:#000}.table-light{--bs-table-color:#000;--bs-table-bg:#f8f9fa;--bs-table-border-color:#dfe0e1;--bs-table-striped-bg:#ecedee;--bs-table-striped-color:#000;--bs-table-active-bg:#dfe0e1;--bs-table-active-color:#000;--bs-table-hover-bg:#e5e6e7;--bs-table-hover-color:#000}.table-dark,.table-light{border-color:var(--bs-table-border-color);color:var(--bs-table-color)}.table-dark{--bs-table-color:#fff;--bs-table-bg:#212529;--bs-table-border-color:#373b3e;--bs-table-striped-bg:#2c3034;--bs-table-striped-color:#fff;--bs-table-active-bg:#373b3e;--bs-table-active-color:#fff;--bs-table-hover-bg:#323539;--bs-table-hover-color:#fff}.table-responsive{-webkit-overflow-scrolling:touch;overflow-x:auto}@media (max-width:539.98px){.table-responsive-sm{-webkit-overflow-scrolling:touch;overflow-x:auto}}@media (max-width:719.98px){.table-responsive-md{-webkit-overflow-scrolling:touch;overflow-x:auto}}@media (max-width:959.98px){.table-responsive-lg{-webkit-overflow-scrolling:touch;overflow-x:auto}}@media (max-width:1199.98px){.table-responsive-xl{-webkit-overflow-scrolling:touch;overflow-x:auto}}.form-label{margin-bottom:.5rem}.col-form-label{font-size:inherit;line-height:1.5;margin-bottom:0;padding-bottom:calc(.375rem + 1px);padding-top:calc(.375rem + 1px)}.col-form-label-lg{font-size:1.25rem;padding-bottom:calc(.5rem + 1px);padding-top:calc(.5rem + 1px)}.col-form-label-sm{font-size:.875rem;padding-bottom:calc(.25rem + 1px);padding-top:calc(.25rem + 1px)}.form-text{color:#6c757d;font-size:.875em;margin-top:.25rem}.form-control{appearance:none;background-clip:padding-box;background-color:#fff;border:1px solid #ced4da;border-radius:.375rem;color:#212529;display:block;font-size:1rem;font-weight:400;line-height:1.5;padding:.375rem .75rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out;width:100%}@media (prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control[type=file]{overflow:hidden}.form-control[type=file]:not(:disabled):not([readonly]){cursor:pointer}.form-control:focus{background-color:#fff;border-color:#86b7fe;box-shadow:0 0 0 .25rem rgba(13,110,253,.25);color:#212529;outline:0}.form-control::-webkit-date-and-time-value{height:1.5em}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled{background-color:#e9ecef;opacity:1}.form-control::file-selector-button{background-color:#e9ecef;border:0 solid;border-color:inherit;border-inline-end-width:1px;border-radius:0;color:#212529;margin:-.375rem -.75rem;margin-inline-end:.75rem;padding:.375rem .75rem;pointer-events:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control::file-selector-button{transition:none}}.form-control:hover:not(:disabled):not([readonly])::file-selector-button{background-color:#dde0e3}.form-control-plaintext{background-color:transparent;border:solid transparent;border-width:1px 0;color:#212529;display:block;line-height:1.5;margin-bottom:0;padding:.375rem 0;width:100%}.form-control-plaintext:focus{outline:0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-left:0;padding-right:0}.form-control-sm{border-radius:.25rem;font-size:.875rem;min-height:calc(1.5em + .5rem + 2px);padding:.25rem .5rem}.form-control-sm::file-selector-button{margin:-.25rem -.5rem;margin-inline-end:.5rem;padding:.25rem .5rem}.form-control-lg{border-radius:.5rem;font-size:1.25rem;min-height:calc(1.5em + 1rem + 2px);padding:.5rem 1rem}.form-control-lg::file-selector-button{margin:-.5rem -1rem;margin-inline-end:1rem;padding:.5rem 1rem}textarea.form-control{min-height:calc(1.5em + .75rem + 2px)}textarea.form-control-sm{min-height:calc(1.5em + .5rem + 2px)}textarea.form-control-lg{min-height:calc(1.5em + 1rem + 2px)}.form-control-color{height:calc(1.5em + .75rem + 2px);padding:.375rem;width:3rem}.form-control-color:not(:disabled):not([readonly]){cursor:pointer}.form-control-color::-moz-color-swatch{border:0!important;border-radius:.375rem}.form-control-color::-webkit-color-swatch{border-radius:.375rem}.form-control-color.form-control-sm{height:calc(1.5em + .5rem + 2px)}.form-control-color.form-control-lg{height:calc(1.5em + 1rem + 2px)}.form-select{-moz-padding-start:calc(.75rem - 3px);appearance:none;background-color:#fff;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3E%3Cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3E%3C/svg%3E");background-position:right .75rem center;background-repeat:no-repeat;background-size:16px 12px;border:1px solid #ced4da;border-radius:.375rem;color:#212529;display:block;font-size:1rem;font-weight:400;line-height:1.5;padding:.375rem 2.25rem .375rem .75rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out;width:100%}@media (prefers-reduced-motion:reduce){.form-select{transition:none}}.form-select:focus{border-color:#86b7fe;box-shadow:0 0 0 .25rem rgba(13,110,253,.25);outline:0}.form-select[multiple],.form-select[size]:not([size="1"]){background-image:none;padding-right:.75rem}.form-select:disabled{background-color:#e9ecef}.form-select:-moz-focusring{color:transparent;text-shadow:0 0 0 #212529}.form-select-sm{border-radius:.25rem;font-size:.875rem;padding-bottom:.25rem;padding-left:.5rem;padding-top:.25rem}.form-select-lg{border-radius:.5rem;font-size:1.25rem;padding-bottom:.5rem;padding-left:1rem;padding-top:.5rem}.form-check{display:block;margin-bottom:.125rem;min-height:1.5rem;padding-left:1.5em}.form-check .form-check-input{float:left;margin-left:-1.5em}.form-check-reverse{padding-left:0;padding-right:1.5em;text-align:right}.form-check-reverse .form-check-input{float:right;margin-left:0;margin-right:-1.5em}.form-check-input{appearance:none;background-color:#fff;background-position:50%;background-repeat:no-repeat;background-size:contain;border:1px solid rgba(0,0,0,.25);height:1em;margin-top:.25em;print-color-adjust:exact;vertical-align:top;width:1em}.form-check-input[type=checkbox]{border-radius:.25em}.form-check-input[type=radio]{border-radius:50%}.form-check-input:active{filter:brightness(90%)}.form-check-input:focus{border-color:#86b7fe;box-shadow:0 0 0 .25rem rgba(13,110,253,.25);outline:0}.form-check-input:checked{background-color:#0d6efd;border-color:#0d6efd}.form-check-input:checked[type=checkbox]{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3E%3Cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='m6 10 3 3 6-6'/%3E%3C/svg%3E")}.form-check-input:checked[type=radio]{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='2' fill='%23fff'/%3E%3C/svg%3E")}.form-check-input[type=checkbox]:indeterminate{background-color:#0d6efd;background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3E%3Cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3E%3C/svg%3E");border-color:#0d6efd}.form-check-input:disabled{filter:none;opacity:.5;pointer-events:none}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{cursor:default;opacity:.5}.form-switch{padding-left:2.5em}.form-switch .form-check-input{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='rgba(0,0,0,0.25)'/%3E%3C/svg%3E");background-position:0;border-radius:2em;margin-left:-2.5em;transition:background-position .15s ease-in-out;width:2em}@media (prefers-reduced-motion:reduce){.form-switch .form-check-input{transition:none}}.form-switch .form-check-input:focus{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%2386b7fe'/%3E%3C/svg%3E")}.form-switch .form-check-input:checked{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E");background-position:100%}.form-switch.form-check-reverse{padding-left:0;padding-right:2.5em}.form-switch.form-check-reverse .form-check-input{margin-left:0;margin-right:-2.5em}.form-check-inline{display:inline-block;margin-right:1rem}.btn-check{clip:rect(0,0,0,0);pointer-events:none;position:absolute}.btn-check:disabled+.btn,.btn-check[disabled]+.btn{filter:none;opacity:.65;pointer-events:none}.form-range{appearance:none;background-color:transparent;height:1.5rem;padding:0;width:100%}.form-range:focus{outline:0}.form-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .25rem rgba(13,110,253,.25)}.form-range::-moz-focus-outer{border:0}.form-range::-webkit-slider-thumb{appearance:none;background-color:#0d6efd;border:0;border-radius:1rem;height:1rem;margin-top:-.25rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;width:1rem}@media (prefers-reduced-motion:reduce){.form-range::-webkit-slider-thumb{transition:none}}.form-range::-webkit-slider-thumb:active{background-color:#b6d4fe}.form-range::-webkit-slider-runnable-track{background-color:#dee2e6;border-color:transparent;border-radius:1rem;color:transparent;cursor:pointer;height:.5rem;width:100%}.form-range::-moz-range-thumb{appearance:none;background-color:#0d6efd;border:0;border-radius:1rem;height:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;width:1rem}@media (prefers-reduced-motion:reduce){.form-range::-moz-range-thumb{transition:none}}.form-range::-moz-range-thumb:active{background-color:#b6d4fe}.form-range::-moz-range-track{background-color:#dee2e6;border-color:transparent;border-radius:1rem;color:transparent;cursor:pointer;height:.5rem;width:100%}.form-range:disabled{pointer-events:none}.form-range:disabled::-webkit-slider-thumb{background-color:#adb5bd}.form-range:disabled::-moz-range-thumb{background-color:#adb5bd}.form-floating{position:relative}.form-floating>.form-control,.form-floating>.form-control-plaintext,.form-floating>.form-select{height:calc(3.5rem + 2px);line-height:1.25}.form-floating>label{border:1px solid transparent;height:100%;left:0;overflow:hidden;padding:1rem .75rem;pointer-events:none;position:absolute;text-align:start;text-overflow:ellipsis;top:0;transform-origin:0 0;transition:opacity .1s ease-in-out,transform .1s ease-in-out;white-space:nowrap;width:100%}@media (prefers-reduced-motion:reduce){.form-floating>label{transition:none}}.form-floating>.form-control,.form-floating>.form-control-plaintext{padding:1rem .75rem}.form-floating>.form-control-plaintext::placeholder,.form-floating>.form-control::placeholder{color:transparent}.form-floating>.form-control-plaintext:focus,.form-floating>.form-control-plaintext:not(:placeholder-shown),.form-floating>.form-control:focus,.form-floating>.form-control:not(:placeholder-shown){padding-bottom:.625rem;padding-top:1.625rem}.form-floating>.form-control-plaintext:-webkit-autofill,.form-floating>.form-control:-webkit-autofill{padding-bottom:.625rem;padding-top:1.625rem}.form-floating>.form-select{padding-bottom:.625rem;padding-top:1.625rem}.form-floating>.form-control-plaintext~label,.form-floating>.form-control:focus~label,.form-floating>.form-control:not(:placeholder-shown)~label,.form-floating>.form-select~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control:-webkit-autofill~label{opacity:.65;transform:scale(.85) translateY(-.5rem) translateX(.15rem)}.form-floating>.form-control-plaintext~label{border-width:1px 0}.input-group{align-items:stretch;display:flex;flex-wrap:wrap;position:relative;width:100%}.input-group>.form-control,.input-group>.form-floating,.input-group>.form-select{flex:1 1 auto;min-width:0;position:relative;width:1%}.input-group>.form-control:focus,.input-group>.form-floating:focus-within,.input-group>.form-select:focus{z-index:5}.input-group .btn{position:relative;z-index:2}.input-group .btn:focus{z-index:5}.input-group-text{align-items:center;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.375rem;color:#212529;display:flex;font-size:1rem;font-weight:400;line-height:1.5;padding:.375rem .75rem;text-align:center;white-space:nowrap}.input-group-lg>.btn,.input-group-lg>.form-control,.input-group-lg>.form-select,.input-group-lg>.input-group-text{border-radius:.5rem;font-size:1.25rem;padding:.5rem 1rem}.input-group-sm>.btn,.input-group-sm>.form-control,.input-group-sm>.form-select,.input-group-sm>.input-group-text{border-radius:.25rem;font-size:.875rem;padding:.25rem .5rem}.input-group-lg>.form-select,.input-group-sm>.form-select{padding-right:3rem}.input-group.has-validation>.dropdown-toggle:nth-last-child(n+4),.input-group.has-validation>.form-floating:nth-last-child(n+3)>.form-control,.input-group.has-validation>.form-floating:nth-last-child(n+3)>.form-select,.input-group.has-validation>:nth-last-child(n+3):not(.dropdown-toggle):not(.dropdown-menu):not(.form-floating),.input-group:not(.has-validation)>.dropdown-toggle:nth-last-child(n+3),.input-group:not(.has-validation)>.form-floating:not(:last-child)>.form-control,.input-group:not(.has-validation)>.form-floating:not(:last-child)>.form-select,.input-group:not(.has-validation)>:not(:last-child):not(.dropdown-toggle):not(.dropdown-menu):not(.form-floating){border-bottom-right-radius:0;border-top-right-radius:0}.input-group>:not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback){border-bottom-left-radius:0;border-top-left-radius:0;margin-left:-1px}.input-group>.form-floating:not(:first-child)>.form-control,.input-group>.form-floating:not(:first-child)>.form-select{border-bottom-left-radius:0;border-top-left-radius:0}.valid-feedback{color:#198754;display:none;font-size:.875em;margin-top:.25rem;width:100%}.valid-tooltip{background-color:rgba(25,135,84,.9);border-radius:.375rem;color:#fff;display:none;font-size:.875rem;margin-top:.1rem;max-width:100%;padding:.25rem .5rem;position:absolute;top:100%;z-index:5}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E");background-position:right calc(.375em + .1875rem) center;background-repeat:no-repeat;background-size:calc(.75em + .375rem) calc(.75em + .375rem);border-color:#198754;padding-right:calc(1.5em + .75rem)}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem);padding-right:calc(1.5em + .75rem)}.form-select.is-valid,.was-validated .form-select:valid{border-color:#198754}.form-select.is-valid:not([multiple]):not([size]),.form-select.is-valid:not([multiple])[size="1"],.was-validated .form-select:valid:not([multiple]):not([size]),.was-validated .form-select:valid:not([multiple])[size="1"]{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3E%3Cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3E%3C/svg%3E"),url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23198754' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem);padding-right:4.125rem}.form-select.is-valid:focus,.was-validated .form-select:valid:focus{border-color:#198754;box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.form-control-color.is-valid,.was-validated .form-control-color:valid{width:calc(3.75rem + 1.5em)}.form-check-input.is-valid,.was-validated .form-check-input:valid{border-color:#198754}.form-check-input.is-valid:checked,.was-validated .form-check-input:valid:checked{background-color:#198754}.form-check-input.is-valid:focus,.was-validated .form-check-input:valid:focus{box-shadow:0 0 0 .25rem rgba(25,135,84,.25)}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#198754}.form-check-inline .form-check-input~.valid-feedback{margin-left:.5em}.input-group>.form-control:not(:focus).is-valid,.input-group>.form-floating:not(:focus-within).is-valid,.input-group>.form-select:not(:focus).is-valid,.was-validated .input-group>.form-control:not(:focus):valid,.was-validated .input-group>.form-floating:not(:focus-within):valid,.was-validated .input-group>.form-select:not(:focus):valid{z-index:3}.invalid-feedback{color:#dc3545;display:none;font-size:.875em;margin-top:.25rem;width:100%}.invalid-tooltip{background-color:rgba(220,53,69,.9);border-radius:.375rem;color:#fff;display:none;font-size:.875rem;margin-top:.1rem;max-width:100%;padding:.25rem .5rem;position:absolute;top:100%;z-index:5}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E");background-position:right calc(.375em + .1875rem) center;background-repeat:no-repeat;background-size:calc(.75em + .375rem) calc(.75em + .375rem);border-color:#dc3545;padding-right:calc(1.5em + .75rem)}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem);padding-right:calc(1.5em + .75rem)}.form-select.is-invalid,.was-validated .form-select:invalid{border-color:#dc3545}.form-select.is-invalid:not([multiple]):not([size]),.form-select.is-invalid:not([multiple])[size="1"],.was-validated .form-select:invalid:not([multiple]):not([size]),.was-validated .form-select:invalid:not([multiple])[size="1"]{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3E%3Cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3E%3C/svg%3E"),url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E");background-position:right .75rem center,center right 2.25rem;background-size:16px 12px,calc(.75em + .375rem) calc(.75em + .375rem);padding-right:4.125rem}.form-select.is-invalid:focus,.was-validated .form-select:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.form-control-color.is-invalid,.was-validated .form-control-color:invalid{width:calc(3.75rem + 1.5em)}.form-check-input.is-invalid,.was-validated .form-check-input:invalid{border-color:#dc3545}.form-check-input.is-invalid:checked,.was-validated .form-check-input:invalid:checked{background-color:#dc3545}.form-check-input.is-invalid:focus,.was-validated .form-check-input:invalid:focus{box-shadow:0 0 0 .25rem rgba(220,53,69,.25)}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-inline .form-check-input~.invalid-feedback{margin-left:.5em}.input-group>.form-control:not(:focus).is-invalid,.input-group>.form-floating:not(:focus-within).is-invalid,.input-group>.form-select:not(:focus).is-invalid,.was-validated .input-group>.form-control:not(:focus):invalid,.was-validated .input-group>.form-floating:not(:focus-within):invalid,.was-validated .input-group>.form-select:not(:focus):invalid{z-index:4}.btn{--bs-btn-padding-x:.75rem;--bs-btn-padding-y:.375rem;--bs-btn-font-family: ;--bs-btn-font-size:1rem;--bs-btn-font-weight:400;--bs-btn-line-height:1.5;--bs-btn-color:#212529;--bs-btn-bg:transparent;--bs-btn-border-width:1px;--bs-btn-border-color:transparent;--bs-btn-border-radius:.375rem;--bs-btn-hover-border-color:transparent;--bs-btn-box-shadow:inset 0 1px 0 hsla(0,0%,100%,.15),0 1px 1px rgba(0,0,0,.075);--bs-btn-disabled-opacity:.65;--bs-btn-focus-box-shadow:0 0 0 .25rem rgba(var(--bs-btn-focus-shadow-rgb),.5);background-color:var(--bs-btn-bg);border:var(--bs-btn-border-width) solid var(--bs-btn-border-color);border-radius:var(--bs-btn-border-radius);color:var(--bs-btn-color);cursor:pointer;display:inline-block;font-family:var(--bs-btn-font-family);font-size:var(--bs-btn-font-size);font-weight:var(--bs-btn-font-weight);line-height:var(--bs-btn-line-height);padding:var(--bs-btn-padding-y) var(--bs-btn-padding-x);text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;user-select:none;vertical-align:middle}@media (prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{background-color:var(--bs-btn-hover-bg);border-color:var(--bs-btn-hover-border-color);color:var(--bs-btn-hover-color)}.btn-check+.btn:hover{background-color:var(--bs-btn-bg);border-color:var(--bs-btn-border-color);color:var(--bs-btn-color)}.btn:focus-visible{background-color:var(--bs-btn-hover-bg);border-color:var(--bs-btn-hover-border-color);box-shadow:var(--bs-btn-focus-box-shadow);color:var(--bs-btn-hover-color);outline:0}.btn-check:focus-visible+.btn{border-color:var(--bs-btn-hover-border-color);box-shadow:var(--bs-btn-focus-box-shadow);outline:0}.btn-check:checked+.btn,.btn.active,.btn.show,.btn:first-child:active,:not(.btn-check)+.btn:active{background-color:var(--bs-btn-active-bg);border-color:var(--bs-btn-active-border-color);color:var(--bs-btn-active-color)}.btn-check:checked+.btn:focus-visible,.btn.active:focus-visible,.btn.show:focus-visible,.btn:first-child:active:focus-visible,:not(.btn-check)+.btn:active:focus-visible{box-shadow:var(--bs-btn-focus-box-shadow)}.btn.disabled,.btn:disabled,fieldset:disabled .btn{background-color:var(--bs-btn-disabled-bg);border-color:var(--bs-btn-disabled-border-color);color:var(--bs-btn-disabled-color);opacity:var(--bs-btn-disabled-opacity);pointer-events:none}.btn-primary{--bs-btn-color:#fff;--bs-btn-bg:#0d6efd;--bs-btn-border-color:#0d6efd;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#0b5ed7;--bs-btn-hover-border-color:#0a58ca;--bs-btn-focus-shadow-rgb:49,132,253;--bs-btn-active-color:#fff;--bs-btn-active-bg:#0a58ca;--bs-btn-active-border-color:#0a53be;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#0d6efd;--bs-btn-disabled-border-color:#0d6efd}.btn-secondary{--bs-btn-color:#fff;--bs-btn-bg:#6c757d;--bs-btn-border-color:#6c757d;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#5c636a;--bs-btn-hover-border-color:#565e64;--bs-btn-focus-shadow-rgb:130,138,145;--bs-btn-active-color:#fff;--bs-btn-active-bg:#565e64;--bs-btn-active-border-color:#51585e;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#6c757d;--bs-btn-disabled-border-color:#6c757d}.btn-success{--bs-btn-color:#fff;--bs-btn-bg:#198754;--bs-btn-border-color:#198754;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#157347;--bs-btn-hover-border-color:#146c43;--bs-btn-focus-shadow-rgb:60,153,110;--bs-btn-active-color:#fff;--bs-btn-active-bg:#146c43;--bs-btn-active-border-color:#13653f;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#198754;--bs-btn-disabled-border-color:#198754}.btn-info{--bs-btn-color:#000;--bs-btn-bg:#0dcaf0;--bs-btn-border-color:#0dcaf0;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#31d2f2;--bs-btn-hover-border-color:#25cff2;--bs-btn-focus-shadow-rgb:11,172,204;--bs-btn-active-color:#000;--bs-btn-active-bg:#3dd5f3;--bs-btn-active-border-color:#25cff2;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#000;--bs-btn-disabled-bg:#0dcaf0;--bs-btn-disabled-border-color:#0dcaf0}.btn-warning{--bs-btn-color:#000;--bs-btn-bg:#ffc107;--bs-btn-border-color:#ffc107;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#ffca2c;--bs-btn-hover-border-color:#ffc720;--bs-btn-focus-shadow-rgb:217,164,6;--bs-btn-active-color:#000;--bs-btn-active-bg:#ffcd39;--bs-btn-active-border-color:#ffc720;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#000;--bs-btn-disabled-bg:#ffc107;--bs-btn-disabled-border-color:#ffc107}.btn-danger{--bs-btn-color:#fff;--bs-btn-bg:#dc3545;--bs-btn-border-color:#dc3545;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#bb2d3b;--bs-btn-hover-border-color:#b02a37;--bs-btn-focus-shadow-rgb:225,83,97;--bs-btn-active-color:#fff;--bs-btn-active-bg:#b02a37;--bs-btn-active-border-color:#a52834;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#dc3545;--bs-btn-disabled-border-color:#dc3545}.btn-light{--bs-btn-color:#000;--bs-btn-bg:#f8f9fa;--bs-btn-border-color:#f8f9fa;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#d3d4d5;--bs-btn-hover-border-color:#c6c7c8;--bs-btn-focus-shadow-rgb:211,212,213;--bs-btn-active-color:#000;--bs-btn-active-bg:#c6c7c8;--bs-btn-active-border-color:#babbbc;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#000;--bs-btn-disabled-bg:#f8f9fa;--bs-btn-disabled-border-color:#f8f9fa}.btn-dark{--bs-btn-color:#fff;--bs-btn-bg:#212529;--bs-btn-border-color:#212529;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#424649;--bs-btn-hover-border-color:#373b3e;--bs-btn-focus-shadow-rgb:66,70,73;--bs-btn-active-color:#fff;--bs-btn-active-bg:#4d5154;--bs-btn-active-border-color:#373b3e;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#fff;--bs-btn-disabled-bg:#212529;--bs-btn-disabled-border-color:#212529}.btn-outline-primary{--bs-btn-color:#0d6efd;--bs-btn-border-color:#0d6efd;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#0d6efd;--bs-btn-hover-border-color:#0d6efd;--bs-btn-focus-shadow-rgb:13,110,253;--bs-btn-active-color:#fff;--bs-btn-active-bg:#0d6efd;--bs-btn-active-border-color:#0d6efd;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#0d6efd;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#0d6efd;--bs-gradient:none}.btn-outline-secondary{--bs-btn-color:#6c757d;--bs-btn-border-color:#6c757d;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#6c757d;--bs-btn-hover-border-color:#6c757d;--bs-btn-focus-shadow-rgb:108,117,125;--bs-btn-active-color:#fff;--bs-btn-active-bg:#6c757d;--bs-btn-active-border-color:#6c757d;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#6c757d;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#6c757d;--bs-gradient:none}.btn-outline-success{--bs-btn-color:#198754;--bs-btn-border-color:#198754;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#198754;--bs-btn-hover-border-color:#198754;--bs-btn-focus-shadow-rgb:25,135,84;--bs-btn-active-color:#fff;--bs-btn-active-bg:#198754;--bs-btn-active-border-color:#198754;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#198754;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#198754;--bs-gradient:none}.btn-outline-info{--bs-btn-color:#0dcaf0;--bs-btn-border-color:#0dcaf0;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#0dcaf0;--bs-btn-hover-border-color:#0dcaf0;--bs-btn-focus-shadow-rgb:13,202,240;--bs-btn-active-color:#000;--bs-btn-active-bg:#0dcaf0;--bs-btn-active-border-color:#0dcaf0;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#0dcaf0;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#0dcaf0;--bs-gradient:none}.btn-outline-warning{--bs-btn-color:#ffc107;--bs-btn-border-color:#ffc107;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#ffc107;--bs-btn-hover-border-color:#ffc107;--bs-btn-focus-shadow-rgb:255,193,7;--bs-btn-active-color:#000;--bs-btn-active-bg:#ffc107;--bs-btn-active-border-color:#ffc107;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#ffc107;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#ffc107;--bs-gradient:none}.btn-outline-danger{--bs-btn-color:#dc3545;--bs-btn-border-color:#dc3545;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#dc3545;--bs-btn-hover-border-color:#dc3545;--bs-btn-focus-shadow-rgb:220,53,69;--bs-btn-active-color:#fff;--bs-btn-active-bg:#dc3545;--bs-btn-active-border-color:#dc3545;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#dc3545;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#dc3545;--bs-gradient:none}.btn-outline-light{--bs-btn-color:#f8f9fa;--bs-btn-border-color:#f8f9fa;--bs-btn-hover-color:#000;--bs-btn-hover-bg:#f8f9fa;--bs-btn-hover-border-color:#f8f9fa;--bs-btn-focus-shadow-rgb:248,249,250;--bs-btn-active-color:#000;--bs-btn-active-bg:#f8f9fa;--bs-btn-active-border-color:#f8f9fa;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#f8f9fa;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#f8f9fa;--bs-gradient:none}.btn-outline-dark{--bs-btn-color:#212529;--bs-btn-border-color:#212529;--bs-btn-hover-color:#fff;--bs-btn-hover-bg:#212529;--bs-btn-hover-border-color:#212529;--bs-btn-focus-shadow-rgb:33,37,41;--bs-btn-active-color:#fff;--bs-btn-active-bg:#212529;--bs-btn-active-border-color:#212529;--bs-btn-active-shadow:inset 0 3px 5px rgba(0,0,0,.125);--bs-btn-disabled-color:#212529;--bs-btn-disabled-bg:transparent;--bs-btn-disabled-border-color:#212529;--bs-gradient:none}.btn-link{--bs-btn-font-weight:400;--bs-btn-color:var(--bs-link-color);--bs-btn-bg:transparent;--bs-btn-border-color:transparent;--bs-btn-hover-color:var(--bs-link-hover-color);--bs-btn-hover-border-color:transparent;--bs-btn-active-color:var(--bs-link-hover-color);--bs-btn-active-border-color:transparent;--bs-btn-disabled-color:#6c757d;--bs-btn-disabled-border-color:transparent;--bs-btn-box-shadow:none;--bs-btn-focus-shadow-rgb:49,132,253;text-decoration:underline}.btn-link:focus-visible{color:var(--bs-btn-color)}.btn-link:hover{color:var(--bs-btn-hover-color)}.btn-group-lg>.btn,.btn-lg{--bs-btn-padding-y:.5rem;--bs-btn-padding-x:1rem;--bs-btn-font-size:1.25rem;--bs-btn-border-radius:.5rem}.btn-group-sm>.btn,.btn-sm{--bs-btn-padding-y:.25rem;--bs-btn-padding-x:.5rem;--bs-btn-font-size:.875rem;--bs-btn-border-radius:.25rem}.fade{transition:opacity .15s linear}@media (prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{height:0;overflow:hidden;transition:height .35s ease}@media (prefers-reduced-motion:reduce){.collapsing{transition:none}}.collapsing.collapse-horizontal{height:auto;transition:width .35s ease;width:0}@media (prefers-reduced-motion:reduce){.collapsing.collapse-horizontal{transition:none}}.dropdown,.dropdown-center,.dropend,.dropstart,.dropup,.dropup-center{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle:after{border-bottom:0;border-left:.3em solid transparent;border-right:.3em solid transparent;border-top:.3em solid;content:"";display:inline-block;margin-left:.255em;vertical-align:.255em}.dropdown-toggle:empty:after{margin-left:0}.dropdown-menu{--bs-dropdown-zindex:1000;--bs-dropdown-min-width:10rem;--bs-dropdown-padding-x:0;--bs-dropdown-padding-y:.5rem;--bs-dropdown-spacer:.125rem;--bs-dropdown-font-size:1rem;--bs-dropdown-color:#212529;--bs-dropdown-bg:#fff;--bs-dropdown-border-color:var(--bs-border-color-translucent);--bs-dropdown-border-radius:.375rem;--bs-dropdown-border-width:1px;--bs-dropdown-inner-border-radius:calc(0.375rem - 1px);--bs-dropdown-divider-bg:var(--bs-border-color-translucent);--bs-dropdown-divider-margin-y:.5rem;--bs-dropdown-box-shadow:0 0.5rem 1rem rgba(0,0,0,.15);--bs-dropdown-link-color:#212529;--bs-dropdown-link-hover-color:#1e2125;--bs-dropdown-link-hover-bg:#e9ecef;--bs-dropdown-link-active-color:#fff;--bs-dropdown-link-active-bg:#0d6efd;--bs-dropdown-link-disabled-color:#adb5bd;--bs-dropdown-item-padding-x:1rem;--bs-dropdown-item-padding-y:.25rem;--bs-dropdown-header-color:#6c757d;--bs-dropdown-header-padding-x:1rem;--bs-dropdown-header-padding-y:.5rem;background-clip:padding-box;background-color:var(--bs-dropdown-bg);border:var(--bs-dropdown-border-width) solid var(--bs-dropdown-border-color);border-radius:var(--bs-dropdown-border-radius);color:var(--bs-dropdown-color);display:none;font-size:var(--bs-dropdown-font-size);list-style:none;margin:0;min-width:var(--bs-dropdown-min-width);padding:var(--bs-dropdown-padding-y) var(--bs-dropdown-padding-x);position:absolute;text-align:left;z-index:var(--bs-dropdown-zindex)}.dropdown-menu[data-bs-popper]{left:0;margin-top:var(--bs-dropdown-spacer);top:100%}.dropdown-menu-start{--bs-position:start}.dropdown-menu-start[data-bs-popper]{left:0;right:auto}.dropdown-menu-end{--bs-position:end}.dropdown-menu-end[data-bs-popper]{left:auto;right:0}@media (min-width:540px){.dropdown-menu-sm-start{--bs-position:start}.dropdown-menu-sm-start[data-bs-popper]{left:0;right:auto}.dropdown-menu-sm-end{--bs-position:end}.dropdown-menu-sm-end[data-bs-popper]{left:auto;right:0}}@media (min-width:720px){.dropdown-menu-md-start{--bs-position:start}.dropdown-menu-md-start[data-bs-popper]{left:0;right:auto}.dropdown-menu-md-end{--bs-position:end}.dropdown-menu-md-end[data-bs-popper]{left:auto;right:0}}@media (min-width:960px){.dropdown-menu-lg-start{--bs-position:start}.dropdown-menu-lg-start[data-bs-popper]{left:0;right:auto}.dropdown-menu-lg-end{--bs-position:end}.dropdown-menu-lg-end[data-bs-popper]{left:auto;right:0}}@media (min-width:1200px){.dropdown-menu-xl-start{--bs-position:start}.dropdown-menu-xl-start[data-bs-popper]{left:0;right:auto}.dropdown-menu-xl-end{--bs-position:end}.dropdown-menu-xl-end[data-bs-popper]{left:auto;right:0}}.dropup .dropdown-menu[data-bs-popper]{bottom:100%;margin-bottom:var(--bs-dropdown-spacer);margin-top:0;top:auto}.dropup .dropdown-toggle:after{border-bottom:.3em solid;border-left:.3em solid transparent;border-right:.3em solid transparent;border-top:0;content:"";display:inline-block;margin-left:.255em;vertical-align:.255em}.dropup .dropdown-toggle:empty:after{margin-left:0}.dropend .dropdown-menu[data-bs-popper]{left:100%;margin-left:var(--bs-dropdown-spacer);margin-top:0;right:auto;top:0}.dropend .dropdown-toggle:after{border-bottom:.3em solid transparent;border-left:.3em solid;border-right:0;border-top:.3em solid transparent;content:"";display:inline-block;margin-left:.255em;vertical-align:.255em}.dropend .dropdown-toggle:empty:after{margin-left:0}.dropend .dropdown-toggle:after{vertical-align:0}.dropstart .dropdown-menu[data-bs-popper]{left:auto;margin-right:var(--bs-dropdown-spacer);margin-top:0;right:100%;top:0}.dropstart .dropdown-toggle:after{content:"";display:inline-block;display:none;margin-left:.255em;vertical-align:.255em}.dropstart .dropdown-toggle:before{border-bottom:.3em solid transparent;border-right:.3em solid;border-top:.3em solid transparent;content:"";display:inline-block;margin-right:.255em;vertical-align:.255em}.dropstart .dropdown-toggle:empty:after{margin-left:0}.dropstart .dropdown-toggle:before{vertical-align:0}.dropdown-divider{border-top:1px solid var(--bs-dropdown-divider-bg);height:0;margin:var(--bs-dropdown-divider-margin-y) 0;opacity:1;overflow:hidden}.dropdown-item{background-color:transparent;border:0;clear:both;color:var(--bs-dropdown-link-color);display:block;font-weight:400;padding:var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x);text-align:inherit;text-decoration:none;white-space:nowrap;width:100%}.dropdown-item:focus,.dropdown-item:hover{background-color:var(--bs-dropdown-link-hover-bg);color:var(--bs-dropdown-link-hover-color)}.dropdown-item.active,.dropdown-item:active{background-color:var(--bs-dropdown-link-active-bg);color:var(--bs-dropdown-link-active-color);text-decoration:none}.dropdown-item.disabled,.dropdown-item:disabled{background-color:transparent;color:var(--bs-dropdown-link-disabled-color);pointer-events:none}.dropdown-menu.show{display:block}.dropdown-header{color:var(--bs-dropdown-header-color);display:block;font-size:.875rem;margin-bottom:0;padding:var(--bs-dropdown-header-padding-y) var(--bs-dropdown-header-padding-x);white-space:nowrap}.dropdown-item-text{color:var(--bs-dropdown-link-color);display:block;padding:var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x)}.dropdown-menu-dark{--bs-dropdown-color:#dee2e6;--bs-dropdown-bg:#343a40;--bs-dropdown-border-color:var(--bs-border-color-translucent);--bs-dropdown-box-shadow: ;--bs-dropdown-link-color:#dee2e6;--bs-dropdown-link-hover-color:#fff;--bs-dropdown-divider-bg:var(--bs-border-color-translucent);--bs-dropdown-link-hover-bg:hsla(0,0%,100%,.15);--bs-dropdown-link-active-color:#fff;--bs-dropdown-link-active-bg:#0d6efd;--bs-dropdown-link-disabled-color:#adb5bd;--bs-dropdown-header-color:#adb5bd}.btn-group,.btn-group-vertical{display:inline-flex;position:relative;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{flex:1 1 auto;position:relative}.btn-group-vertical>.btn-check:checked+.btn,.btn-group-vertical>.btn-check:focus+.btn,.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn-check:checked+.btn,.btn-group>.btn-check:focus+.btn,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group{border-radius:.375rem}.btn-group>.btn-group:not(:first-child),.btn-group>:not(.btn-check:first-child)+.btn{margin-left:-1px}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn.dropdown-toggle-split:first-child,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:nth-child(n+3),.btn-group>:not(.btn-check)+.btn{border-bottom-left-radius:0;border-top-left-radius:0}.dropdown-toggle-split{padding-left:.5625rem;padding-right:.5625rem}.dropdown-toggle-split:after,.dropend .dropdown-toggle-split:after,.dropup .dropdown-toggle-split:after{margin-left:0}.dropstart .dropdown-toggle-split:before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-left:.375rem;padding-right:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-left:.75rem;padding-right:.75rem}.btn-group-vertical{align-items:flex-start;flex-direction:column;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-left-radius:0;border-bottom-right-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn~.btn{border-top-left-radius:0;border-top-right-radius:0}.nav{--bs-nav-link-padding-x:1rem;--bs-nav-link-padding-y:.5rem;--bs-nav-link-font-weight: ;--bs-nav-link-color:var(--bs-link-color);--bs-nav-link-hover-color:var(--bs-link-hover-color);--bs-nav-link-disabled-color:#6c757d;display:flex;flex-wrap:wrap;list-style:none;margin-bottom:0;padding-left:0}.nav-link{color:var(--bs-nav-link-color);display:block;font-size:var(--bs-nav-link-font-size);font-weight:var(--bs-nav-link-font-weight);padding:var(--bs-nav-link-padding-y) var(--bs-nav-link-padding-x);text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out}@media (prefers-reduced-motion:reduce){.nav-link{transition:none}}.nav-link:focus,.nav-link:hover{color:var(--bs-nav-link-hover-color)}.nav-link.disabled{color:var(--bs-nav-link-disabled-color);cursor:default;pointer-events:none}.nav-tabs{--bs-nav-tabs-border-width:1px;--bs-nav-tabs-border-color:#dee2e6;--bs-nav-tabs-border-radius:.375rem;--bs-nav-tabs-link-hover-border-color:#e9ecef #e9ecef #dee2e6;--bs-nav-tabs-link-active-color:#495057;--bs-nav-tabs-link-active-bg:#fff;--bs-nav-tabs-link-active-border-color:#dee2e6 #dee2e6 #fff;border-bottom:var(--bs-nav-tabs-border-width) solid var(--bs-nav-tabs-border-color)}.nav-tabs .nav-link{background:none;border:var(--bs-nav-tabs-border-width) solid transparent;border-top-left-radius:var(--bs-nav-tabs-border-radius);border-top-right-radius:var(--bs-nav-tabs-border-radius);margin-bottom:calc(var(--bs-nav-tabs-border-width)*-1)}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:var(--bs-nav-tabs-link-hover-border-color);isolation:isolate}.nav-tabs .nav-link.disabled,.nav-tabs .nav-link:disabled{background-color:transparent;border-color:transparent;color:var(--bs-nav-link-disabled-color)}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{background-color:var(--bs-nav-tabs-link-active-bg);border-color:var(--bs-nav-tabs-link-active-border-color);color:var(--bs-nav-tabs-link-active-color)}.nav-tabs .dropdown-menu{border-top-left-radius:0;border-top-right-radius:0;margin-top:calc(var(--bs-nav-tabs-border-width)*-1)}.nav-pills{--bs-nav-pills-border-radius:.375rem;--bs-nav-pills-link-active-color:#fff;--bs-nav-pills-link-active-bg:#0d6efd}.nav-pills .nav-link{background:none;border:0;border-radius:var(--bs-nav-pills-border-radius)}.nav-pills .nav-link:disabled{background-color:transparent;border-color:transparent;color:var(--bs-nav-link-disabled-color)}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{background-color:var(--bs-nav-pills-link-active-bg);color:var(--bs-nav-pills-link-active-color)}.nav-fill .nav-item,.nav-fill>.nav-link{flex:1 1 auto;text-align:center}.nav-justified .nav-item,.nav-justified>.nav-link{flex-basis:0;flex-grow:1;text-align:center}.nav-fill .nav-item .nav-link,.nav-justified .nav-item .nav-link{width:100%}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{--bs-navbar-padding-x:0;--bs-navbar-padding-y:.5rem;--bs-navbar-color:rgba(0,0,0,.55);--bs-navbar-hover-color:rgba(0,0,0,.7);--bs-navbar-disabled-color:rgba(0,0,0,.3);--bs-navbar-active-color:rgba(0,0,0,.9);--bs-navbar-brand-padding-y:.3125rem;--bs-navbar-brand-margin-end:1rem;--bs-navbar-brand-font-size:1.25rem;--bs-navbar-brand-color:rgba(0,0,0,.9);--bs-navbar-brand-hover-color:rgba(0,0,0,.9);--bs-navbar-nav-link-padding-x:.5rem;--bs-navbar-toggler-padding-y:.25rem;--bs-navbar-toggler-padding-x:.75rem;--bs-navbar-toggler-font-size:1.25rem;--bs-navbar-toggler-icon-bg:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3E%3Cpath stroke='rgba(0,0,0,0.55)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E");--bs-navbar-toggler-border-color:rgba(0,0,0,.1);--bs-navbar-toggler-border-radius:.375rem;--bs-navbar-toggler-focus-width:.25rem;--bs-navbar-toggler-transition:box-shadow 0.15s ease-in-out;align-items:center;display:flex;flex-wrap:wrap;justify-content:space-between;padding:var(--bs-navbar-padding-y) var(--bs-navbar-padding-x);position:relative}.navbar>.container,.navbar>.container-fluid,.navbar>.container-lg,.navbar>.container-md,.navbar>.container-sm,.navbar>.container-xl{align-items:center;display:flex;flex-wrap:inherit;justify-content:space-between}.navbar-brand{color:var(--bs-navbar-brand-color);font-size:var(--bs-navbar-brand-font-size);margin-right:var(--bs-navbar-brand-margin-end);padding-bottom:var(--bs-navbar-brand-padding-y);padding-top:var(--bs-navbar-brand-padding-y);text-decoration:none;white-space:nowrap}.navbar-brand:focus,.navbar-brand:hover{color:var(--bs-navbar-brand-hover-color)}.navbar-nav{--bs-nav-link-padding-x:0;--bs-nav-link-padding-y:.5rem;--bs-nav-link-font-weight: ;--bs-nav-link-color:var(--bs-navbar-color);--bs-nav-link-hover-color:var(--bs-navbar-hover-color);--bs-nav-link-disabled-color:var(--bs-navbar-disabled-color);display:flex;flex-direction:column;list-style:none;margin-bottom:0;padding-left:0}.navbar-nav .nav-link.active,.navbar-nav .show>.nav-link{color:var(--bs-navbar-active-color)}.navbar-nav .dropdown-menu{position:static}.navbar-text{color:var(--bs-navbar-color);padding-bottom:.5rem;padding-top:.5rem}.navbar-text a,.navbar-text a:focus,.navbar-text a:hover{color:var(--bs-navbar-active-color)}.navbar-collapse{align-items:center;flex-basis:100%;flex-grow:1}.navbar-toggler{background-color:transparent;border:var(--bs-border-width) solid var(--bs-navbar-toggler-border-color);border-radius:var(--bs-navbar-toggler-border-radius);color:var(--bs-navbar-color);font-size:var(--bs-navbar-toggler-font-size);line-height:1;padding:var(--bs-navbar-toggler-padding-y) var(--bs-navbar-toggler-padding-x);transition:var(--bs-navbar-toggler-transition)}@media (prefers-reduced-motion:reduce){.navbar-toggler{transition:none}}.navbar-toggler:hover{text-decoration:none}.navbar-toggler:focus{box-shadow:0 0 0 var(--bs-navbar-toggler-focus-width);outline:0;text-decoration:none}.navbar-toggler-icon{background-image:var(--bs-navbar-toggler-icon-bg);background-position:50%;background-repeat:no-repeat;background-size:100%;display:inline-block;height:1.5em;vertical-align:middle;width:1.5em}.navbar-nav-scroll{max-height:var(--bs-scroll-height,75vh);overflow-y:auto}@media (min-width:540px){.navbar-expand-sm{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-left:var(--bs-navbar-nav-link-padding-x);padding-right:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-sm .navbar-nav-scroll{overflow:visible}.navbar-expand-sm .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}.navbar-expand-sm .offcanvas{background-color:transparent!important;border:0!important;flex-grow:1;height:auto!important;position:static;transform:none!important;transition:none;visibility:visible!important;width:auto!important;z-index:auto}.navbar-expand-sm .offcanvas .offcanvas-header{display:none}.navbar-expand-sm .offcanvas .offcanvas-body{display:flex;flex-grow:0;overflow-y:visible;padding:0}}@media (min-width:720px){.navbar-expand-md{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-left:var(--bs-navbar-nav-link-padding-x);padding-right:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-md .navbar-nav-scroll{overflow:visible}.navbar-expand-md .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}.navbar-expand-md .offcanvas{background-color:transparent!important;border:0!important;flex-grow:1;height:auto!important;position:static;transform:none!important;transition:none;visibility:visible!important;width:auto!important;z-index:auto}.navbar-expand-md .offcanvas .offcanvas-header{display:none}.navbar-expand-md .offcanvas .offcanvas-body{display:flex;flex-grow:0;overflow-y:visible;padding:0}}@media (min-width:960px){.navbar-expand-lg{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-left:var(--bs-navbar-nav-link-padding-x);padding-right:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-lg .navbar-nav-scroll{overflow:visible}.navbar-expand-lg .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}.navbar-expand-lg .offcanvas{background-color:transparent!important;border:0!important;flex-grow:1;height:auto!important;position:static;transform:none!important;transition:none;visibility:visible!important;width:auto!important;z-index:auto}.navbar-expand-lg .offcanvas .offcanvas-header{display:none}.navbar-expand-lg .offcanvas .offcanvas-body{display:flex;flex-grow:0;overflow-y:visible;padding:0}}@media (min-width:1200px){.navbar-expand-xl{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-left:var(--bs-navbar-nav-link-padding-x);padding-right:var(--bs-navbar-nav-link-padding-x)}.navbar-expand-xl .navbar-nav-scroll{overflow:visible}.navbar-expand-xl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}.navbar-expand-xl .offcanvas{background-color:transparent!important;border:0!important;flex-grow:1;height:auto!important;position:static;transform:none!important;transition:none;visibility:visible!important;width:auto!important;z-index:auto}.navbar-expand-xl .offcanvas .offcanvas-header{display:none}.navbar-expand-xl .offcanvas .offcanvas-body{display:flex;flex-grow:0;overflow-y:visible;padding:0}}.navbar-expand{flex-wrap:nowrap;justify-content:flex-start}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-left:var(--bs-navbar-nav-link-padding-x);padding-right:var(--bs-navbar-nav-link-padding-x)}.navbar-expand .navbar-nav-scroll{overflow:visible}.navbar-expand .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-expand .offcanvas{background-color:transparent!important;border:0!important;flex-grow:1;height:auto!important;position:static;transform:none!important;transition:none;visibility:visible!important;width:auto!important;z-index:auto}.navbar-expand .offcanvas .offcanvas-header{display:none}.navbar-expand .offcanvas .offcanvas-body{display:flex;flex-grow:0;overflow-y:visible;padding:0}.navbar-dark{--bs-navbar-color:hsla(0,0%,100%,.55);--bs-navbar-hover-color:hsla(0,0%,100%,.75);--bs-navbar-disabled-color:hsla(0,0%,100%,.25);--bs-navbar-active-color:#fff;--bs-navbar-brand-color:#fff;--bs-navbar-brand-hover-color:#fff;--bs-navbar-toggler-border-color:hsla(0,0%,100%,.1);--bs-navbar-toggler-icon-bg:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3E%3Cpath stroke='rgba(255,255,255,0.55)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.card{--bs-card-spacer-y:1rem;--bs-card-spacer-x:1rem;--bs-card-title-spacer-y:.5rem;--bs-card-border-width:1px;--bs-card-border-color:var(--bs-border-color-translucent);--bs-card-border-radius:.375rem;--bs-card-box-shadow: ;--bs-card-inner-border-radius:calc(0.375rem - 1px);--bs-card-cap-padding-y:.5rem;--bs-card-cap-padding-x:1rem;--bs-card-cap-bg:rgba(0,0,0,.03);--bs-card-cap-color: ;--bs-card-height: ;--bs-card-color: ;--bs-card-bg:#fff;--bs-card-img-overlay-padding:1rem;--bs-card-group-margin:.75rem;word-wrap:break-word;background-clip:border-box;background-color:var(--bs-card-bg);border:var(--bs-card-border-width) solid var(--bs-card-border-color);border-radius:var(--bs-card-border-radius);display:flex;flex-direction:column;height:var(--bs-card-height);min-width:0;position:relative}.card>hr{margin-left:0;margin-right:0}.card>.list-group{border-bottom:inherit;border-top:inherit}.card>.list-group:first-child{border-top-left-radius:var(--bs-card-inner-border-radius);border-top-right-radius:var(--bs-card-inner-border-radius);border-top-width:0}.card>.list-group:last-child{border-bottom-left-radius:var(--bs-card-inner-border-radius);border-bottom-right-radius:var(--bs-card-inner-border-radius);border-bottom-width:0}.card>.card-header+.list-group,.card>.list-group+.card-footer{border-top:0}.card-body{color:var(--bs-card-color);flex:1 1 auto;padding:var(--bs-card-spacer-y) var(--bs-card-spacer-x)}.card-title{margin-bottom:var(--bs-card-title-spacer-y)}.card-subtitle{margin-top:calc(var(--bs-card-title-spacer-y)*-.5)}.card-subtitle,.card-text:last-child{margin-bottom:0}.card-link+.card-link{margin-left:var(--bs-card-spacer-x)}.card-header{background-color:var(--bs-card-cap-bg);border-bottom:var(--bs-card-border-width) solid var(--bs-card-border-color);color:var(--bs-card-cap-color);margin-bottom:0;padding:var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x)}.card-header:first-child{border-radius:var(--bs-card-inner-border-radius) var(--bs-card-inner-border-radius) 0 0}.card-footer{background-color:var(--bs-card-cap-bg);border-top:var(--bs-card-border-width) solid var(--bs-card-border-color);color:var(--bs-card-cap-color);padding:var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x)}.card-footer:last-child{border-radius:0 0 var(--bs-card-inner-border-radius) var(--bs-card-inner-border-radius)}.card-header-tabs{border-bottom:0;margin-bottom:calc(var(--bs-card-cap-padding-y)*-1);margin-left:calc(var(--bs-card-cap-padding-x)*-.5);margin-right:calc(var(--bs-card-cap-padding-x)*-.5)}.card-header-tabs .nav-link.active{background-color:var(--bs-card-bg);border-bottom-color:var(--bs-card-bg)}.card-header-pills{margin-left:calc(var(--bs-card-cap-padding-x)*-.5);margin-right:calc(var(--bs-card-cap-padding-x)*-.5)}.card-img-overlay{border-radius:var(--bs-card-inner-border-radius);bottom:0;left:0;padding:var(--bs-card-img-overlay-padding);position:absolute;right:0;top:0}.card-img,.card-img-bottom,.card-img-top{width:100%}.card-img,.card-img-top{border-top-left-radius:var(--bs-card-inner-border-radius);border-top-right-radius:var(--bs-card-inner-border-radius)}.card-img,.card-img-bottom{border-bottom-left-radius:var(--bs-card-inner-border-radius);border-bottom-right-radius:var(--bs-card-inner-border-radius)}.card-group>.card{margin-bottom:var(--bs-card-group-margin)}@media (min-width:540px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{border-left:0;margin-left:0}.card-group>.card:not(:last-child){border-bottom-right-radius:0;border-top-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.accordion{--bs-accordion-color:#212529;--bs-accordion-bg:#fff;--bs-accordion-transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out,border-radius 0.15s ease;--bs-accordion-border-color:var(--bs-border-color);--bs-accordion-border-width:1px;--bs-accordion-border-radius:.375rem;--bs-accordion-inner-border-radius:calc(0.375rem - 1px);--bs-accordion-btn-padding-x:1.25rem;--bs-accordion-btn-padding-y:1rem;--bs-accordion-btn-color:#212529;--bs-accordion-btn-bg:var(--bs-accordion-bg);--bs-accordion-btn-icon:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23212529'%3E%3Cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3E%3C/svg%3E");--bs-accordion-btn-icon-width:1.25rem;--bs-accordion-btn-icon-transform:rotate(-180deg);--bs-accordion-btn-icon-transition:transform 0.2s ease-in-out;--bs-accordion-btn-active-icon:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%230c63e4'%3E%3Cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3E%3C/svg%3E");--bs-accordion-btn-focus-border-color:#86b7fe;--bs-accordion-btn-focus-box-shadow:0 0 0 .25rem rgba(13,110,253,.25);--bs-accordion-body-padding-x:1.25rem;--bs-accordion-body-padding-y:1rem;--bs-accordion-active-color:#0c63e4;--bs-accordion-active-bg:#e7f1ff}.accordion-button{align-items:center;background-color:var(--bs-accordion-btn-bg);border:0;border-radius:0;color:var(--bs-accordion-btn-color);display:flex;font-size:1rem;overflow-anchor:none;padding:var(--bs-accordion-btn-padding-y) var(--bs-accordion-btn-padding-x);position:relative;text-align:left;transition:var(--bs-accordion-transition);width:100%}@media (prefers-reduced-motion:reduce){.accordion-button{transition:none}}.accordion-button:not(.collapsed){background-color:var(--bs-accordion-active-bg);box-shadow:inset 0 calc(var(--bs-accordion-border-width)*-1) 0 var(--bs-accordion-border-color);color:var(--bs-accordion-active-color)}.accordion-button:not(.collapsed):after{background-image:var(--bs-accordion-btn-active-icon);transform:var(--bs-accordion-btn-icon-transform)}.accordion-button:after{background-image:var(--bs-accordion-btn-icon);background-repeat:no-repeat;background-size:var(--bs-accordion-btn-icon-width);content:"";flex-shrink:0;height:var(--bs-accordion-btn-icon-width);margin-left:auto;transition:var(--bs-accordion-btn-icon-transition);width:var(--bs-accordion-btn-icon-width)}@media (prefers-reduced-motion:reduce){.accordion-button:after{transition:none}}.accordion-button:hover{z-index:2}.accordion-button:focus{border-color:var(--bs-accordion-btn-focus-border-color);box-shadow:var(--bs-accordion-btn-focus-box-shadow);outline:0;z-index:3}.accordion-header{margin-bottom:0}.accordion-item{background-color:var(--bs-accordion-bg);border:var(--bs-accordion-border-width) solid var(--bs-accordion-border-color);color:var(--bs-accordion-color)}.accordion-item:first-of-type{border-top-left-radius:var(--bs-accordion-border-radius);border-top-right-radius:var(--bs-accordion-border-radius)}.accordion-item:first-of-type .accordion-button{border-top-left-radius:var(--bs-accordion-inner-border-radius);border-top-right-radius:var(--bs-accordion-inner-border-radius)}.accordion-item:not(:first-of-type){border-top:0}.accordion-item:last-of-type{border-bottom-left-radius:var(--bs-accordion-border-radius);border-bottom-right-radius:var(--bs-accordion-border-radius)}.accordion-item:last-of-type .accordion-button.collapsed{border-bottom-left-radius:var(--bs-accordion-inner-border-radius);border-bottom-right-radius:var(--bs-accordion-inner-border-radius)}.accordion-item:last-of-type .accordion-collapse{border-bottom-left-radius:var(--bs-accordion-border-radius);border-bottom-right-radius:var(--bs-accordion-border-radius)}.accordion-body{padding:var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x)}.accordion-flush .accordion-collapse{border-width:0}.accordion-flush .accordion-item{border-left:0;border-radius:0;border-right:0}.accordion-flush .accordion-item:first-child{border-top:0}.accordion-flush .accordion-item:last-child{border-bottom:0}.accordion-flush .accordion-item .accordion-button,.accordion-flush .accordion-item .accordion-button.collapsed{border-radius:0}.breadcrumb{--bs-breadcrumb-padding-x:0;--bs-breadcrumb-padding-y:0;--bs-breadcrumb-margin-bottom:1rem;--bs-breadcrumb-bg: ;--bs-breadcrumb-border-radius: ;--bs-breadcrumb-divider-color:#6c757d;--bs-breadcrumb-item-padding-x:.5rem;--bs-breadcrumb-item-active-color:#6c757d;background-color:var(--bs-breadcrumb-bg);border-radius:var(--bs-breadcrumb-border-radius);display:flex;flex-wrap:wrap;font-size:var(--bs-breadcrumb-font-size);list-style:none;margin-bottom:var(--bs-breadcrumb-margin-bottom);padding:var(--bs-breadcrumb-padding-y) var(--bs-breadcrumb-padding-x)}.breadcrumb-item+.breadcrumb-item{padding-left:var(--bs-breadcrumb-item-padding-x)}.breadcrumb-item+.breadcrumb-item:before{color:var(--bs-breadcrumb-divider-color);content:var(--bs-breadcrumb-divider,"/");float:left;padding-right:var(--bs-breadcrumb-item-padding-x)}.breadcrumb-item.active{color:var(--bs-breadcrumb-item-active-color)}.pagination{--bs-pagination-padding-x:.75rem;--bs-pagination-padding-y:.375rem;--bs-pagination-font-size:1rem;--bs-pagination-color:var(--bs-link-color);--bs-pagination-bg:#fff;--bs-pagination-border-width:1px;--bs-pagination-border-color:#dee2e6;--bs-pagination-border-radius:.375rem;--bs-pagination-hover-color:var(--bs-link-hover-color);--bs-pagination-hover-bg:#e9ecef;--bs-pagination-hover-border-color:#dee2e6;--bs-pagination-focus-color:var(--bs-link-hover-color);--bs-pagination-focus-bg:#e9ecef;--bs-pagination-focus-box-shadow:0 0 0 .25rem rgba(13,110,253,.25);--bs-pagination-active-color:#fff;--bs-pagination-active-bg:#0d6efd;--bs-pagination-active-border-color:#0d6efd;--bs-pagination-disabled-color:#6c757d;--bs-pagination-disabled-bg:#fff;--bs-pagination-disabled-border-color:#dee2e6;display:flex;list-style:none;padding-left:0}.page-link{background-color:var(--bs-pagination-bg);border:var(--bs-pagination-border-width) solid var(--bs-pagination-border-color);color:var(--bs-pagination-color);display:block;font-size:var(--bs-pagination-font-size);padding:var(--bs-pagination-padding-y) var(--bs-pagination-padding-x);position:relative;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.page-link{transition:none}}.page-link:hover{background-color:var(--bs-pagination-hover-bg);border-color:var(--bs-pagination-hover-border-color);color:var(--bs-pagination-hover-color);z-index:2}.page-link:focus{background-color:var(--bs-pagination-focus-bg);box-shadow:var(--bs-pagination-focus-box-shadow);color:var(--bs-pagination-focus-color);outline:0;z-index:3}.active>.page-link,.page-link.active{background-color:var(--bs-pagination-active-bg);border-color:var(--bs-pagination-active-border-color);color:var(--bs-pagination-active-color);z-index:3}.disabled>.page-link,.page-link.disabled{background-color:var(--bs-pagination-disabled-bg);border-color:var(--bs-pagination-disabled-border-color);color:var(--bs-pagination-disabled-color);pointer-events:none}.page-item:not(:first-child) .page-link{margin-left:-1px}.page-item:first-child .page-link{border-bottom-left-radius:var(--bs-pagination-border-radius);border-top-left-radius:var(--bs-pagination-border-radius)}.page-item:last-child .page-link{border-bottom-right-radius:var(--bs-pagination-border-radius);border-top-right-radius:var(--bs-pagination-border-radius)}.pagination-lg{--bs-pagination-padding-x:1.5rem;--bs-pagination-padding-y:.75rem;--bs-pagination-font-size:1.25rem;--bs-pagination-border-radius:.5rem}.pagination-sm{--bs-pagination-padding-x:.5rem;--bs-pagination-padding-y:.25rem;--bs-pagination-font-size:.875rem;--bs-pagination-border-radius:.25rem}.badge{--bs-badge-padding-x:.65em;--bs-badge-padding-y:.35em;--bs-badge-font-size:.75em;--bs-badge-font-weight:700;--bs-badge-color:#fff;--bs-badge-border-radius:.375rem;border-radius:var(--bs-badge-border-radius);color:var(--bs-badge-color);display:inline-block;font-size:var(--bs-badge-font-size);font-weight:var(--bs-badge-font-weight);line-height:1;padding:var(--bs-badge-padding-y) var(--bs-badge-padding-x);text-align:center;vertical-align:baseline;white-space:nowrap}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.alert{--bs-alert-bg:transparent;--bs-alert-padding-x:1rem;--bs-alert-padding-y:1rem;--bs-alert-margin-bottom:1rem;--bs-alert-color:inherit;--bs-alert-border-color:transparent;--bs-alert-border:1px solid var(--bs-alert-border-color);--bs-alert-border-radius:.375rem;background-color:var(--bs-alert-bg);border:var(--bs-alert-border);border-radius:var(--bs-alert-border-radius);color:var(--bs-alert-color);margin-bottom:var(--bs-alert-margin-bottom);padding:var(--bs-alert-padding-y) var(--bs-alert-padding-x);position:relative}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:3rem}.alert-dismissible .btn-close{padding:1.25rem 1rem;position:absolute;right:0;top:0;z-index:2}.alert-primary{--bs-alert-color:#084298;--bs-alert-bg:#cfe2ff;--bs-alert-border-color:#b6d4fe}.alert-primary .alert-link{color:#06357a}.alert-secondary{--bs-alert-color:#41464b;--bs-alert-bg:#e2e3e5;--bs-alert-border-color:#d3d6d8}.alert-secondary .alert-link{color:#34383c}.alert-success{--bs-alert-color:#0f5132;--bs-alert-bg:#d1e7dd;--bs-alert-border-color:#badbcc}.alert-success .alert-link{color:#0c4128}.alert-info{--bs-alert-color:#055160;--bs-alert-bg:#cff4fc;--bs-alert-border-color:#b6effb}.alert-info .alert-link{color:#04414d}.alert-warning{--bs-alert-color:#664d03;--bs-alert-bg:#fff3cd;--bs-alert-border-color:#ffecb5}.alert-warning .alert-link{color:#523e02}.alert-danger{--bs-alert-color:#842029;--bs-alert-bg:#f8d7da;--bs-alert-border-color:#f5c2c7}.alert-danger .alert-link{color:#6a1a21}.alert-light{--bs-alert-color:#636464;--bs-alert-bg:#fefefe;--bs-alert-border-color:#fdfdfe}.alert-light .alert-link{color:#4f5050}.alert-dark{--bs-alert-color:#141619;--bs-alert-bg:#d3d3d4;--bs-alert-border-color:#bcbebf}.alert-dark .alert-link{color:#101214}@keyframes progress-bar-stripes{0%{background-position-x:1rem}}.progress{--bs-progress-height:1rem;--bs-progress-font-size:.75rem;--bs-progress-bg:#e9ecef;--bs-progress-border-radius:.375rem;--bs-progress-box-shadow:inset 0 1px 2px rgba(0,0,0,.075);--bs-progress-bar-color:#fff;--bs-progress-bar-bg:#0d6efd;--bs-progress-bar-transition:width 0.6s ease;background-color:var(--bs-progress-bg);border-radius:var(--bs-progress-border-radius);font-size:var(--bs-progress-font-size);height:var(--bs-progress-height)}.progress,.progress-bar{display:flex;overflow:hidden}.progress-bar{background-color:var(--bs-progress-bar-bg);color:var(--bs-progress-bar-color);flex-direction:column;justify-content:center;text-align:center;transition:var(--bs-progress-bar-transition);white-space:nowrap}@media (prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,hsla(0,0%,100%,.15) 25%,transparent 0,transparent 50%,hsla(0,0%,100%,.15) 0,hsla(0,0%,100%,.15) 75%,transparent 0,transparent);background-size:var(--bs-progress-height) var(--bs-progress-height)}.progress-bar-animated{animation:progress-bar-stripes 1s linear infinite}@media (prefers-reduced-motion:reduce){.progress-bar-animated{animation:none}}.list-group{--bs-list-group-color:#212529;--bs-list-group-bg:#fff;--bs-list-group-border-color:rgba(0,0,0,.125);--bs-list-group-border-width:1px;--bs-list-group-border-radius:.375rem;--bs-list-group-item-padding-x:1rem;--bs-list-group-item-padding-y:.5rem;--bs-list-group-action-color:#495057;--bs-list-group-action-hover-color:#495057;--bs-list-group-action-hover-bg:#f8f9fa;--bs-list-group-action-active-color:#212529;--bs-list-group-action-active-bg:#e9ecef;--bs-list-group-disabled-color:#6c757d;--bs-list-group-disabled-bg:#fff;--bs-list-group-active-color:#fff;--bs-list-group-active-bg:#0d6efd;--bs-list-group-active-border-color:#0d6efd;border-radius:var(--bs-list-group-border-radius);display:flex;flex-direction:column;margin-bottom:0;padding-left:0}.list-group-numbered{counter-reset:section;list-style-type:none}.list-group-numbered>.list-group-item:before{content:counters(section,".") ". ";counter-increment:section}.list-group-item-action{color:var(--bs-list-group-action-color);text-align:inherit;width:100%}.list-group-item-action:focus,.list-group-item-action:hover{background-color:var(--bs-list-group-action-hover-bg);color:var(--bs-list-group-action-hover-color);text-decoration:none;z-index:1}.list-group-item-action:active{background-color:var(--bs-list-group-action-active-bg);color:var(--bs-list-group-action-active-color)}.list-group-item{background-color:var(--bs-list-group-bg);border:var(--bs-list-group-border-width) solid var(--bs-list-group-border-color);color:var(--bs-list-group-color);display:block;padding:var(--bs-list-group-item-padding-y) var(--bs-list-group-item-padding-x);position:relative;text-decoration:none}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-left-radius:inherit;border-bottom-right-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{background-color:var(--bs-list-group-disabled-bg);color:var(--bs-list-group-disabled-color);pointer-events:none}.list-group-item.active{background-color:var(--bs-list-group-active-bg);border-color:var(--bs-list-group-active-border-color);color:var(--bs-list-group-active-color);z-index:2}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{border-top-width:var(--bs-list-group-border-width);margin-top:calc(var(--bs-list-group-border-width)*-1)}.list-group-horizontal{flex-direction:row}.list-group-horizontal>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal>.list-group-item:last-child:not(:first-child){border-bottom-left-radius:0;border-top-right-radius:var(--bs-list-group-border-radius)}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-left-width:0;border-top-width:var(--bs-list-group-border-width)}.list-group-horizontal>.list-group-item+.list-group-item.active{border-left-width:var(--bs-list-group-border-width);margin-left:calc(var(--bs-list-group-border-width)*-1)}@media (min-width:540px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-sm>.list-group-item:last-child:not(:first-child){border-bottom-left-radius:0;border-top-right-radius:var(--bs-list-group-border-radius)}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-left-width:0;border-top-width:var(--bs-list-group-border-width)}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{border-left-width:var(--bs-list-group-border-width);margin-left:calc(var(--bs-list-group-border-width)*-1)}}@media (min-width:720px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-md>.list-group-item:last-child:not(:first-child){border-bottom-left-radius:0;border-top-right-radius:var(--bs-list-group-border-radius)}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-left-width:0;border-top-width:var(--bs-list-group-border-width)}.list-group-horizontal-md>.list-group-item+.list-group-item.active{border-left-width:var(--bs-list-group-border-width);margin-left:calc(var(--bs-list-group-border-width)*-1)}}@media (min-width:960px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-lg>.list-group-item:last-child:not(:first-child){border-bottom-left-radius:0;border-top-right-radius:var(--bs-list-group-border-radius)}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-left-width:0;border-top-width:var(--bs-list-group-border-width)}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{border-left-width:var(--bs-list-group-border-width);margin-left:calc(var(--bs-list-group-border-width)*-1)}}@media (min-width:1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child:not(:last-child){border-bottom-left-radius:var(--bs-list-group-border-radius);border-top-right-radius:0}.list-group-horizontal-xl>.list-group-item:last-child:not(:first-child){border-bottom-left-radius:0;border-top-right-radius:var(--bs-list-group-border-radius)}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-left-width:0;border-top-width:var(--bs-list-group-border-width)}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{border-left-width:var(--bs-list-group-border-width);margin-left:calc(var(--bs-list-group-border-width)*-1)}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 var(--bs-list-group-border-width)}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{background-color:#cfe2ff;color:#084298}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{background-color:#bacbe6;color:#084298}.list-group-item-primary.list-group-item-action.active{background-color:#084298;border-color:#084298;color:#fff}.list-group-item-secondary{background-color:#e2e3e5;color:#41464b}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{background-color:#cbccce;color:#41464b}.list-group-item-secondary.list-group-item-action.active{background-color:#41464b;border-color:#41464b;color:#fff}.list-group-item-success{background-color:#d1e7dd;color:#0f5132}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{background-color:#bcd0c7;color:#0f5132}.list-group-item-success.list-group-item-action.active{background-color:#0f5132;border-color:#0f5132;color:#fff}.list-group-item-info{background-color:#cff4fc;color:#055160}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{background-color:#badce3;color:#055160}.list-group-item-info.list-group-item-action.active{background-color:#055160;border-color:#055160;color:#fff}.list-group-item-warning{background-color:#fff3cd;color:#664d03}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{background-color:#e6dbb9;color:#664d03}.list-group-item-warning.list-group-item-action.active{background-color:#664d03;border-color:#664d03;color:#fff}.list-group-item-danger{background-color:#f8d7da;color:#842029}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{background-color:#dfc2c4;color:#842029}.list-group-item-danger.list-group-item-action.active{background-color:#842029;border-color:#842029;color:#fff}.list-group-item-light{background-color:#fefefe;color:#636464}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{background-color:#e5e5e5;color:#636464}.list-group-item-light.list-group-item-action.active{background-color:#636464;border-color:#636464;color:#fff}.list-group-item-dark{background-color:#d3d3d4;color:#141619}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{background-color:#bebebf;color:#141619}.list-group-item-dark.list-group-item-action.active{background-color:#141619;border-color:#141619;color:#fff}.btn-close{background:transparent url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3E%3Cpath d='M.293.293a1 1 0 0 1 1.414 0L8 6.586 14.293.293a1 1 0 1 1 1.414 1.414L9.414 8l6.293 6.293a1 1 0 0 1-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 0 1-1.414-1.414L6.586 8 .293 1.707a1 1 0 0 1 0-1.414z'/%3E%3C/svg%3E") 50%/1em auto no-repeat;border:0;border-radius:.375rem;box-sizing:content-box;color:#000;height:1em;opacity:.5;padding:.25em;width:1em}.btn-close:hover{color:#000;opacity:.75;text-decoration:none}.btn-close:focus{box-shadow:0 0 0 .25rem rgba(13,110,253,.25);opacity:1;outline:0}.btn-close.disabled,.btn-close:disabled{opacity:.25;pointer-events:none;user-select:none}.btn-close-white{filter:invert(1) grayscale(100%) brightness(200%)}.toast{--bs-toast-zindex:1090;--bs-toast-padding-x:.75rem;--bs-toast-padding-y:.5rem;--bs-toast-spacing:1.5rem;--bs-toast-max-width:350px;--bs-toast-font-size:.875rem;--bs-toast-color: ;--bs-toast-bg:hsla(0,0%,100%,.85);--bs-toast-border-width:1px;--bs-toast-border-color:var(--bs-border-color-translucent);--bs-toast-border-radius:.375rem;--bs-toast-box-shadow:0 0.5rem 1rem rgba(0,0,0,.15);--bs-toast-header-color:#6c757d;--bs-toast-header-bg:hsla(0,0%,100%,.85);--bs-toast-header-border-color:rgba(0,0,0,.05);background-clip:padding-box;background-color:var(--bs-toast-bg);border:var(--bs-toast-border-width) solid var(--bs-toast-border-color);border-radius:var(--bs-toast-border-radius);box-shadow:var(--bs-toast-box-shadow);color:var(--bs-toast-color);font-size:var(--bs-toast-font-size);max-width:100%;pointer-events:auto;width:var(--bs-toast-max-width)}.toast.showing{opacity:0}.toast:not(.show){display:none}.toast-container{--bs-toast-zindex:1090;max-width:100%;pointer-events:none;position:absolute;width:max-content;z-index:var(--bs-toast-zindex)}.toast-container>:not(:last-child){margin-bottom:var(--bs-toast-spacing)}.toast-header{align-items:center;background-clip:padding-box;background-color:var(--bs-toast-header-bg);border-bottom:var(--bs-toast-border-width) solid var(--bs-toast-header-border-color);border-top-left-radius:calc(var(--bs-toast-border-radius) - var(--bs-toast-border-width));border-top-right-radius:calc(var(--bs-toast-border-radius) - var(--bs-toast-border-width));color:var(--bs-toast-header-color);display:flex;padding:var(--bs-toast-padding-y) var(--bs-toast-padding-x)}.toast-header .btn-close{margin-left:var(--bs-toast-padding-x);margin-right:calc(var(--bs-toast-padding-x)*-.5)}.toast-body{word-wrap:break-word;padding:var(--bs-toast-padding-x)}.modal{--bs-modal-zindex:1055;--bs-modal-width:500px;--bs-modal-padding:1rem;--bs-modal-margin:.5rem;--bs-modal-color: ;--bs-modal-bg:#fff;--bs-modal-border-color:var(--bs-border-color-translucent);--bs-modal-border-width:1px;--bs-modal-border-radius:.5rem;--bs-modal-box-shadow:0 0.125rem 0.25rem rgba(0,0,0,.075);--bs-modal-inner-border-radius:calc(0.5rem - 1px);--bs-modal-header-padding-x:1rem;--bs-modal-header-padding-y:1rem;--bs-modal-header-padding:1rem 1rem;--bs-modal-header-border-color:var(--bs-border-color);--bs-modal-header-border-width:1px;--bs-modal-title-line-height:1.5;--bs-modal-footer-gap:.5rem;--bs-modal-footer-bg: ;--bs-modal-footer-border-color:var(--bs-border-color);--bs-modal-footer-border-width:1px;display:none;height:100%;left:0;outline:0;overflow-x:hidden;overflow-y:auto;position:fixed;top:0;width:100%;z-index:var(--bs-modal-zindex)}.modal-dialog{margin:var(--bs-modal-margin);pointer-events:none;position:relative;width:auto}.modal.fade .modal-dialog{transform:translateY(-50px);transition:transform .3s ease-out}@media (prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{height:calc(100% - var(--bs-modal-margin)*2)}.modal-dialog-scrollable .modal-content{max-height:100%;overflow:hidden}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{align-items:center;display:flex;min-height:calc(100% - var(--bs-modal-margin)*2)}.modal-content{background-clip:padding-box;background-color:var(--bs-modal-bg);border:var(--bs-modal-border-width) solid var(--bs-modal-border-color);border-radius:var(--bs-modal-border-radius);color:var(--bs-modal-color);display:flex;flex-direction:column;outline:0;pointer-events:auto;position:relative;width:100%}.modal-backdrop{--bs-backdrop-zindex:1050;--bs-backdrop-bg:#000;--bs-backdrop-opacity:.5;background-color:var(--bs-backdrop-bg);height:100vh;left:0;position:fixed;top:0;width:100vw;z-index:var(--bs-backdrop-zindex)}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:var(--bs-backdrop-opacity)}.modal-header{align-items:center;border-bottom:var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color);border-top-left-radius:var(--bs-modal-inner-border-radius);border-top-right-radius:var(--bs-modal-inner-border-radius);display:flex;flex-shrink:0;justify-content:space-between;padding:var(--bs-modal-header-padding)}.modal-header .btn-close{margin:calc(var(--bs-modal-header-padding-y)*-.5) calc(var(--bs-modal-header-padding-x)*-.5) calc(var(--bs-modal-header-padding-y)*-.5) auto;padding:calc(var(--bs-modal-header-padding-y)*.5) calc(var(--bs-modal-header-padding-x)*.5)}.modal-title{line-height:var(--bs-modal-title-line-height);margin-bottom:0}.modal-body{flex:1 1 auto;padding:var(--bs-modal-padding);position:relative}.modal-footer{align-items:center;background-color:var(--bs-modal-footer-bg);border-bottom-left-radius:var(--bs-modal-inner-border-radius);border-bottom-right-radius:var(--bs-modal-inner-border-radius);border-top:var(--bs-modal-footer-border-width) solid var(--bs-modal-footer-border-color);display:flex;flex-shrink:0;flex-wrap:wrap;justify-content:flex-end;padding:calc(var(--bs-modal-padding) - var(--bs-modal-footer-gap)*.5)}.modal-footer>*{margin:calc(var(--bs-modal-footer-gap)*.5)}@media (min-width:540px){.modal{--bs-modal-margin:1.75rem;--bs-modal-box-shadow:0 0.5rem 1rem rgba(0,0,0,.15)}.modal-dialog{margin-left:auto;margin-right:auto;max-width:var(--bs-modal-width)}.modal-sm{--bs-modal-width:300px}}@media (min-width:960px){.modal-lg,.modal-xl{--bs-modal-width:800px}}@media (min-width:1200px){.modal-xl{--bs-modal-width:1140px}}.modal-fullscreen{height:100%;margin:0;max-width:none;width:100vw}.modal-fullscreen .modal-content{border:0;border-radius:0;height:100%}.modal-fullscreen .modal-footer,.modal-fullscreen .modal-header{border-radius:0}.modal-fullscreen .modal-body{overflow-y:auto}@media (max-width:539.98px){.modal-fullscreen-sm-down{height:100%;margin:0;max-width:none;width:100vw}.modal-fullscreen-sm-down .modal-content{border:0;border-radius:0;height:100%}.modal-fullscreen-sm-down .modal-footer,.modal-fullscreen-sm-down .modal-header{border-radius:0}.modal-fullscreen-sm-down .modal-body{overflow-y:auto}}@media (max-width:719.98px){.modal-fullscreen-md-down{height:100%;margin:0;max-width:none;width:100vw}.modal-fullscreen-md-down .modal-content{border:0;border-radius:0;height:100%}.modal-fullscreen-md-down .modal-footer,.modal-fullscreen-md-down .modal-header{border-radius:0}.modal-fullscreen-md-down .modal-body{overflow-y:auto}}@media (max-width:959.98px){.modal-fullscreen-lg-down{height:100%;margin:0;max-width:none;width:100vw}.modal-fullscreen-lg-down .modal-content{border:0;border-radius:0;height:100%}.modal-fullscreen-lg-down .modal-footer,.modal-fullscreen-lg-down .modal-header{border-radius:0}.modal-fullscreen-lg-down .modal-body{overflow-y:auto}}@media (max-width:1199.98px){.modal-fullscreen-xl-down{height:100%;margin:0;max-width:none;width:100vw}.modal-fullscreen-xl-down .modal-content{border:0;border-radius:0;height:100%}.modal-fullscreen-xl-down .modal-footer,.modal-fullscreen-xl-down .modal-header{border-radius:0}.modal-fullscreen-xl-down .modal-body{overflow-y:auto}}.tooltip{--bs-tooltip-zindex:1080;--bs-tooltip-max-width:200px;--bs-tooltip-padding-x:.5rem;--bs-tooltip-padding-y:.25rem;--bs-tooltip-margin: ;--bs-tooltip-font-size:.875rem;--bs-tooltip-color:#fff;--bs-tooltip-bg:#000;--bs-tooltip-border-radius:.375rem;--bs-tooltip-opacity:.9;--bs-tooltip-arrow-width:.8rem;--bs-tooltip-arrow-height:.4rem;word-wrap:break-word;display:block;font-family:var(--bs-font-sans-serif);font-size:var(--bs-tooltip-font-size);font-style:normal;font-weight:400;letter-spacing:normal;line-break:auto;line-height:1.5;margin:var(--bs-tooltip-margin);opacity:0;padding:var(--bs-tooltip-arrow-height);text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;z-index:var(--bs-tooltip-zindex)}.tooltip.show{opacity:var(--bs-tooltip-opacity)}.tooltip .tooltip-arrow{display:block;height:var(--bs-tooltip-arrow-height);width:var(--bs-tooltip-arrow-width)}.tooltip .tooltip-arrow:before{border-color:transparent;border-style:solid;content:"";position:absolute}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow,.bs-tooltip-top .tooltip-arrow{bottom:0}.bs-tooltip-auto[data-popper-placement^=top] .tooltip-arrow:before,.bs-tooltip-top .tooltip-arrow:before{border-top-color:var(--bs-tooltip-bg);border-width:var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width)*.5) 0;top:-1px}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow,.bs-tooltip-end .tooltip-arrow{height:var(--bs-tooltip-arrow-width);left:0;width:var(--bs-tooltip-arrow-height)}.bs-tooltip-auto[data-popper-placement^=right] .tooltip-arrow:before,.bs-tooltip-end .tooltip-arrow:before{border-right-color:var(--bs-tooltip-bg);border-width:calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width)*.5) 0;right:-1px}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow,.bs-tooltip-bottom .tooltip-arrow{top:0}.bs-tooltip-auto[data-popper-placement^=bottom] .tooltip-arrow:before,.bs-tooltip-bottom .tooltip-arrow:before{border-bottom-color:var(--bs-tooltip-bg);border-width:0 calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height);bottom:-1px}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow,.bs-tooltip-start .tooltip-arrow{height:var(--bs-tooltip-arrow-width);right:0;width:var(--bs-tooltip-arrow-height)}.bs-tooltip-auto[data-popper-placement^=left] .tooltip-arrow:before,.bs-tooltip-start .tooltip-arrow:before{border-left-color:var(--bs-tooltip-bg);border-width:calc(var(--bs-tooltip-arrow-width)*.5) 0 calc(var(--bs-tooltip-arrow-width)*.5) var(--bs-tooltip-arrow-height);left:-1px}.tooltip-inner{background-color:var(--bs-tooltip-bg);border-radius:var(--bs-tooltip-border-radius);color:var(--bs-tooltip-color);max-width:var(--bs-tooltip-max-width);padding:var(--bs-tooltip-padding-y) var(--bs-tooltip-padding-x);text-align:center}.popover{--bs-popover-zindex:1070;--bs-popover-max-width:276px;--bs-popover-font-size:.875rem;--bs-popover-bg:#fff;--bs-popover-border-width:1px;--bs-popover-border-color:var(--bs-border-color-translucent);--bs-popover-border-radius:.5rem;--bs-popover-inner-border-radius:calc(0.5rem - 1px);--bs-popover-box-shadow:0 0.5rem 1rem rgba(0,0,0,.15);--bs-popover-header-padding-x:1rem;--bs-popover-header-padding-y:.5rem;--bs-popover-header-font-size:1rem;--bs-popover-header-color: ;--bs-popover-header-bg:#f0f0f0;--bs-popover-body-padding-x:1rem;--bs-popover-body-padding-y:1rem;--bs-popover-body-color:#212529;--bs-popover-arrow-width:1rem;--bs-popover-arrow-height:.5rem;--bs-popover-arrow-border:var(--bs-popover-border-color);word-wrap:break-word;background-clip:padding-box;background-color:var(--bs-popover-bg);border:var(--bs-popover-border-width) solid var(--bs-popover-border-color);border-radius:var(--bs-popover-border-radius);display:block;font-family:var(--bs-font-sans-serif);font-size:var(--bs-popover-font-size);font-style:normal;font-weight:400;letter-spacing:normal;line-break:auto;line-height:1.5;max-width:var(--bs-popover-max-width);text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;white-space:normal;word-break:normal;word-spacing:normal;z-index:var(--bs-popover-zindex)}.popover .popover-arrow{display:block;height:var(--bs-popover-arrow-height);width:var(--bs-popover-arrow-width)}.popover .popover-arrow:after,.popover .popover-arrow:before{border:0 solid transparent;content:"";display:block;position:absolute}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow,.bs-popover-top>.popover-arrow{bottom:calc((var(--bs-popover-arrow-height))*-1 - var(--bs-popover-border-width))}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow:after,.bs-popover-auto[data-popper-placement^=top]>.popover-arrow:before,.bs-popover-top>.popover-arrow:after,.bs-popover-top>.popover-arrow:before{border-width:var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width)*.5) 0}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow:before,.bs-popover-top>.popover-arrow:before{border-top-color:var(--bs-popover-arrow-border);bottom:0}.bs-popover-auto[data-popper-placement^=top]>.popover-arrow:after,.bs-popover-top>.popover-arrow:after{border-top-color:var(--bs-popover-bg);bottom:var(--bs-popover-border-width)}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow,.bs-popover-end>.popover-arrow{height:var(--bs-popover-arrow-width);left:calc((var(--bs-popover-arrow-height))*-1 - var(--bs-popover-border-width));width:var(--bs-popover-arrow-height)}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow:after,.bs-popover-auto[data-popper-placement^=right]>.popover-arrow:before,.bs-popover-end>.popover-arrow:after,.bs-popover-end>.popover-arrow:before{border-width:calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width)*.5) 0}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow:before,.bs-popover-end>.popover-arrow:before{border-right-color:var(--bs-popover-arrow-border);left:0}.bs-popover-auto[data-popper-placement^=right]>.popover-arrow:after,.bs-popover-end>.popover-arrow:after{border-right-color:var(--bs-popover-bg);left:var(--bs-popover-border-width)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow,.bs-popover-bottom>.popover-arrow{top:calc((var(--bs-popover-arrow-height))*-1 - var(--bs-popover-border-width))}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow:after,.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow:before,.bs-popover-bottom>.popover-arrow:after,.bs-popover-bottom>.popover-arrow:before{border-width:0 calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height)}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow:before,.bs-popover-bottom>.popover-arrow:before{border-bottom-color:var(--bs-popover-arrow-border);top:0}.bs-popover-auto[data-popper-placement^=bottom]>.popover-arrow:after,.bs-popover-bottom>.popover-arrow:after{border-bottom-color:var(--bs-popover-bg);top:var(--bs-popover-border-width)}.bs-popover-auto[data-popper-placement^=bottom] .popover-header:before,.bs-popover-bottom .popover-header:before{border-bottom:var(--bs-popover-border-width) solid var(--bs-popover-header-bg);content:"";display:block;left:50%;margin-left:calc(var(--bs-popover-arrow-width)*-.5);position:absolute;top:0;width:var(--bs-popover-arrow-width)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow,.bs-popover-start>.popover-arrow{height:var(--bs-popover-arrow-width);right:calc((var(--bs-popover-arrow-height))*-1 - var(--bs-popover-border-width));width:var(--bs-popover-arrow-height)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow:after,.bs-popover-auto[data-popper-placement^=left]>.popover-arrow:before,.bs-popover-start>.popover-arrow:after,.bs-popover-start>.popover-arrow:before{border-width:calc(var(--bs-popover-arrow-width)*.5) 0 calc(var(--bs-popover-arrow-width)*.5) var(--bs-popover-arrow-height)}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow:before,.bs-popover-start>.popover-arrow:before{border-left-color:var(--bs-popover-arrow-border);right:0}.bs-popover-auto[data-popper-placement^=left]>.popover-arrow:after,.bs-popover-start>.popover-arrow:after{border-left-color:var(--bs-popover-bg);right:var(--bs-popover-border-width)}.popover-header{background-color:var(--bs-popover-header-bg);border-bottom:var(--bs-popover-border-width) solid var(--bs-popover-border-color);border-top-left-radius:var(--bs-popover-inner-border-radius);border-top-right-radius:var(--bs-popover-inner-border-radius);color:var(--bs-popover-header-color);font-size:var(--bs-popover-header-font-size);margin-bottom:0;padding:var(--bs-popover-header-padding-y) var(--bs-popover-header-padding-x)}.popover-header:empty{display:none}.popover-body{color:var(--bs-popover-body-color);padding:var(--bs-popover-body-padding-y) var(--bs-popover-body-padding-x)}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{overflow:hidden;position:relative;width:100%}.carousel-inner:after{clear:both;content:"";display:block}.carousel-item{backface-visibility:hidden;display:none;float:left;margin-right:-100%;position:relative;transition:transform .6s ease-in-out;width:100%}@media (prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-end,.carousel-item-next:not(.carousel-item-start){transform:translateX(100%)}.active.carousel-item-start,.carousel-item-prev:not(.carousel-item-end){transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transform:none;transition-property:opacity}.carousel-fade .carousel-item-next.carousel-item-start,.carousel-fade .carousel-item-prev.carousel-item-end,.carousel-fade .carousel-item.active{opacity:1;z-index:1}.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{opacity:0;transition:opacity 0s .6s;z-index:0}@media (prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-end,.carousel-fade .active.carousel-item-start{transition:none}}.carousel-control-next,.carousel-control-prev{align-items:center;background:none;border:0;bottom:0;color:#fff;display:flex;justify-content:center;opacity:.5;padding:0;position:absolute;text-align:center;top:0;transition:opacity .15s ease;width:15%;z-index:1}@media (prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;opacity:.9;outline:0;text-decoration:none}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{background-position:50%;background-repeat:no-repeat;background-size:100% 100%;display:inline-block;height:2rem;width:2rem}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3E%3Cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3E%3Cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3E%3C/svg%3E")}.carousel-indicators{bottom:0;display:flex;justify-content:center;left:0;list-style:none;margin-bottom:1rem;margin-left:15%;margin-right:15%;padding:0;position:absolute;right:0;z-index:2}.carousel-indicators [data-bs-target]{background-clip:padding-box;background-color:#fff;border:0;border-bottom:10px solid transparent;border-top:10px solid transparent;box-sizing:content-box;cursor:pointer;flex:0 1 auto;height:3px;margin-left:3px;margin-right:3px;opacity:.5;padding:0;text-indent:-999px;transition:opacity .6s ease;width:30px}@media (prefers-reduced-motion:reduce){.carousel-indicators [data-bs-target]{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{bottom:1.25rem;color:#fff;left:15%;padding-bottom:1.25rem;padding-top:1.25rem;position:absolute;right:15%;text-align:center}.carousel-dark .carousel-control-next-icon,.carousel-dark .carousel-control-prev-icon{filter:invert(1) grayscale(100)}.carousel-dark .carousel-indicators [data-bs-target]{background-color:#000}.carousel-dark .carousel-caption{color:#000}.spinner-border,.spinner-grow{animation:var(--bs-spinner-animation-speed) linear infinite var(--bs-spinner-animation-name);border-radius:50%;display:inline-block;height:var(--bs-spinner-height);vertical-align:var(--bs-spinner-vertical-align);width:var(--bs-spinner-width)}@keyframes spinner-border{to{transform:rotate(1turn)}}.spinner-border{--bs-spinner-width:2rem;--bs-spinner-height:2rem;--bs-spinner-vertical-align:-.125em;--bs-spinner-border-width:.25em;--bs-spinner-animation-speed:.75s;--bs-spinner-animation-name:spinner-border;border-right-color:currentcolor;border:var(--bs-spinner-border-width) solid;border-right:var(--bs-spinner-border-width) solid transparent}.spinner-border-sm{--bs-spinner-width:1rem;--bs-spinner-height:1rem;--bs-spinner-border-width:.2em}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{--bs-spinner-width:2rem;--bs-spinner-height:2rem;--bs-spinner-vertical-align:-.125em;--bs-spinner-animation-speed:.75s;--bs-spinner-animation-name:spinner-grow;background-color:currentcolor;opacity:0}.spinner-grow-sm{--bs-spinner-width:1rem;--bs-spinner-height:1rem}@media (prefers-reduced-motion:reduce){.spinner-border,.spinner-grow{--bs-spinner-animation-speed:1.5s}}.offcanvas,.offcanvas-lg,.offcanvas-md,.offcanvas-sm,.offcanvas-xl{--bs-offcanvas-zindex:1045;--bs-offcanvas-width:400px;--bs-offcanvas-height:30vh;--bs-offcanvas-padding-x:1rem;--bs-offcanvas-padding-y:1rem;--bs-offcanvas-color: ;--bs-offcanvas-bg:#fff;--bs-offcanvas-border-width:1px;--bs-offcanvas-border-color:var(--bs-border-color-translucent);--bs-offcanvas-box-shadow:0 0.125rem 0.25rem rgba(0,0,0,.075)}@media (max-width:539.98px){.offcanvas-sm{background-clip:padding-box;background-color:var(--bs-offcanvas-bg);bottom:0;color:var(--bs-offcanvas-color);display:flex;flex-direction:column;max-width:100%;outline:0;position:fixed;transition:transform .3s ease-in-out;visibility:hidden;z-index:var(--bs-offcanvas-zindex)}}@media (max-width:539.98px) and (prefers-reduced-motion:reduce){.offcanvas-sm{transition:none}}@media (max-width:539.98px){.offcanvas-sm.offcanvas-start{border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);left:0;top:0;transform:translateX(-100%);width:var(--bs-offcanvas-width)}.offcanvas-sm.offcanvas-end{border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);right:0;top:0;transform:translateX(100%);width:var(--bs-offcanvas-width)}.offcanvas-sm.offcanvas-top{border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);top:0;transform:translateY(-100%)}.offcanvas-sm.offcanvas-bottom,.offcanvas-sm.offcanvas-top{height:var(--bs-offcanvas-height);left:0;max-height:100%;right:0}.offcanvas-sm.offcanvas-bottom{border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-sm.show:not(.hiding),.offcanvas-sm.showing{transform:none}.offcanvas-sm.hiding,.offcanvas-sm.show,.offcanvas-sm.showing{visibility:visible}}@media (min-width:540px){.offcanvas-sm{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-sm .offcanvas-header{display:none}.offcanvas-sm .offcanvas-body{background-color:transparent!important;display:flex;flex-grow:0;overflow-y:visible;padding:0}}@media (max-width:719.98px){.offcanvas-md{background-clip:padding-box;background-color:var(--bs-offcanvas-bg);bottom:0;color:var(--bs-offcanvas-color);display:flex;flex-direction:column;max-width:100%;outline:0;position:fixed;transition:transform .3s ease-in-out;visibility:hidden;z-index:var(--bs-offcanvas-zindex)}}@media (max-width:719.98px) and (prefers-reduced-motion:reduce){.offcanvas-md{transition:none}}@media (max-width:719.98px){.offcanvas-md.offcanvas-start{border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);left:0;top:0;transform:translateX(-100%);width:var(--bs-offcanvas-width)}.offcanvas-md.offcanvas-end{border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);right:0;top:0;transform:translateX(100%);width:var(--bs-offcanvas-width)}.offcanvas-md.offcanvas-top{border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);top:0;transform:translateY(-100%)}.offcanvas-md.offcanvas-bottom,.offcanvas-md.offcanvas-top{height:var(--bs-offcanvas-height);left:0;max-height:100%;right:0}.offcanvas-md.offcanvas-bottom{border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-md.show:not(.hiding),.offcanvas-md.showing{transform:none}.offcanvas-md.hiding,.offcanvas-md.show,.offcanvas-md.showing{visibility:visible}}@media (min-width:720px){.offcanvas-md{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-md .offcanvas-header{display:none}.offcanvas-md .offcanvas-body{background-color:transparent!important;display:flex;flex-grow:0;overflow-y:visible;padding:0}}@media (max-width:959.98px){.offcanvas-lg{background-clip:padding-box;background-color:var(--bs-offcanvas-bg);bottom:0;color:var(--bs-offcanvas-color);display:flex;flex-direction:column;max-width:100%;outline:0;position:fixed;transition:transform .3s ease-in-out;visibility:hidden;z-index:var(--bs-offcanvas-zindex)}}@media (max-width:959.98px) and (prefers-reduced-motion:reduce){.offcanvas-lg{transition:none}}@media (max-width:959.98px){.offcanvas-lg.offcanvas-start{border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);left:0;top:0;transform:translateX(-100%);width:var(--bs-offcanvas-width)}.offcanvas-lg.offcanvas-end{border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);right:0;top:0;transform:translateX(100%);width:var(--bs-offcanvas-width)}.offcanvas-lg.offcanvas-top{border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);top:0;transform:translateY(-100%)}.offcanvas-lg.offcanvas-bottom,.offcanvas-lg.offcanvas-top{height:var(--bs-offcanvas-height);left:0;max-height:100%;right:0}.offcanvas-lg.offcanvas-bottom{border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-lg.show:not(.hiding),.offcanvas-lg.showing{transform:none}.offcanvas-lg.hiding,.offcanvas-lg.show,.offcanvas-lg.showing{visibility:visible}}@media (min-width:960px){.offcanvas-lg{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-lg .offcanvas-header{display:none}.offcanvas-lg .offcanvas-body{background-color:transparent!important;display:flex;flex-grow:0;overflow-y:visible;padding:0}}@media (max-width:1199.98px){.offcanvas-xl{background-clip:padding-box;background-color:var(--bs-offcanvas-bg);bottom:0;color:var(--bs-offcanvas-color);display:flex;flex-direction:column;max-width:100%;outline:0;position:fixed;transition:transform .3s ease-in-out;visibility:hidden;z-index:var(--bs-offcanvas-zindex)}}@media (max-width:1199.98px) and (prefers-reduced-motion:reduce){.offcanvas-xl{transition:none}}@media (max-width:1199.98px){.offcanvas-xl.offcanvas-start{border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);left:0;top:0;transform:translateX(-100%);width:var(--bs-offcanvas-width)}.offcanvas-xl.offcanvas-end{border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);right:0;top:0;transform:translateX(100%);width:var(--bs-offcanvas-width)}.offcanvas-xl.offcanvas-top{border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);top:0;transform:translateY(-100%)}.offcanvas-xl.offcanvas-bottom,.offcanvas-xl.offcanvas-top{height:var(--bs-offcanvas-height);left:0;max-height:100%;right:0}.offcanvas-xl.offcanvas-bottom{border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas-xl.show:not(.hiding),.offcanvas-xl.showing{transform:none}.offcanvas-xl.hiding,.offcanvas-xl.show,.offcanvas-xl.showing{visibility:visible}}@media (min-width:1200px){.offcanvas-xl{--bs-offcanvas-height:auto;--bs-offcanvas-border-width:0;background-color:transparent!important}.offcanvas-xl .offcanvas-header{display:none}.offcanvas-xl .offcanvas-body{background-color:transparent!important;display:flex;flex-grow:0;overflow-y:visible;padding:0}}.offcanvas{background-clip:padding-box;background-color:var(--bs-offcanvas-bg);bottom:0;color:var(--bs-offcanvas-color);display:flex;flex-direction:column;max-width:100%;outline:0;position:fixed;transition:transform .3s ease-in-out;visibility:hidden;z-index:var(--bs-offcanvas-zindex)}@media (prefers-reduced-motion:reduce){.offcanvas{transition:none}}.offcanvas.offcanvas-start{border-right:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);left:0;top:0;transform:translateX(-100%);width:var(--bs-offcanvas-width)}.offcanvas.offcanvas-end{border-left:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);right:0;top:0;transform:translateX(100%);width:var(--bs-offcanvas-width)}.offcanvas.offcanvas-top{border-bottom:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);top:0;transform:translateY(-100%)}.offcanvas.offcanvas-bottom,.offcanvas.offcanvas-top{height:var(--bs-offcanvas-height);left:0;max-height:100%;right:0}.offcanvas.offcanvas-bottom{border-top:var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color);transform:translateY(100%)}.offcanvas.show:not(.hiding),.offcanvas.showing{transform:none}.offcanvas.hiding,.offcanvas.show,.offcanvas.showing{visibility:visible}.offcanvas-backdrop{background-color:#000;height:100vh;left:0;position:fixed;top:0;width:100vw;z-index:1040}.offcanvas-backdrop.fade{opacity:0}.offcanvas-backdrop.show{opacity:.5}.offcanvas-header{align-items:center;display:flex;justify-content:space-between;padding:var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x)}.offcanvas-header .btn-close{margin-bottom:calc(var(--bs-offcanvas-padding-y)*-.5);margin-right:calc(var(--bs-offcanvas-padding-x)*-.5);margin-top:calc(var(--bs-offcanvas-padding-y)*-.5);padding:calc(var(--bs-offcanvas-padding-y)*.5) calc(var(--bs-offcanvas-padding-x)*.5)}.offcanvas-title{line-height:1.5;margin-bottom:0}.offcanvas-body{flex-grow:1;overflow-y:auto;padding:var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x)}.placeholder{background-color:currentcolor;cursor:wait;display:inline-block;min-height:1em;opacity:.5;vertical-align:middle}.placeholder.btn:before{content:"";display:inline-block}.placeholder-xs{min-height:.6em}.placeholder-sm{min-height:.8em}.placeholder-lg{min-height:1.2em}.placeholder-glow .placeholder{animation:placeholder-glow 2s ease-in-out infinite}@keyframes placeholder-glow{50%{opacity:.2}}.placeholder-wave{animation:placeholder-wave 2s linear infinite;mask-image:linear-gradient(130deg,#000 55%,rgba(0,0,0,.8) 75%,#000 95%);mask-size:200% 100%}@keyframes placeholder-wave{to{mask-position:-200% 0}}.clearfix:after{clear:both;content:"";display:block}.text-bg-primary{background-color:RGBA(13,110,253,var(--bs-bg-opacity,1))!important;color:#fff!important}.text-bg-secondary{background-color:RGBA(108,117,125,var(--bs-bg-opacity,1))!important;color:#fff!important}.text-bg-success{background-color:RGBA(25,135,84,var(--bs-bg-opacity,1))!important;color:#fff!important}.text-bg-info{background-color:RGBA(13,202,240,var(--bs-bg-opacity,1))!important;color:#000!important}.text-bg-warning{background-color:RGBA(255,193,7,var(--bs-bg-opacity,1))!important;color:#000!important}.text-bg-danger{background-color:RGBA(220,53,69,var(--bs-bg-opacity,1))!important;color:#fff!important}.text-bg-light{background-color:RGBA(248,249,250,var(--bs-bg-opacity,1))!important;color:#000!important}.text-bg-dark{background-color:RGBA(33,37,41,var(--bs-bg-opacity,1))!important;color:#fff!important}.link-primary{color:#0d6efd!important}.link-primary:focus,.link-primary:hover{color:#0a58ca!important}.link-secondary{color:#6c757d!important}.link-secondary:focus,.link-secondary:hover{color:#565e64!important}.link-success{color:#198754!important}.link-success:focus,.link-success:hover{color:#146c43!important}.link-info{color:#0dcaf0!important}.link-info:focus,.link-info:hover{color:#3dd5f3!important}.link-warning{color:#ffc107!important}.link-warning:focus,.link-warning:hover{color:#ffcd39!important}.link-danger{color:#dc3545!important}.link-danger:focus,.link-danger:hover{color:#b02a37!important}.link-light{color:#f8f9fa!important}.link-light:focus,.link-light:hover{color:#f9fafb!important}.link-dark{color:#212529!important}.link-dark:focus,.link-dark:hover{color:#1a1e21!important}.ratio{position:relative;width:100%}.ratio:before{content:"";display:block;padding-top:var(--bs-aspect-ratio)}.ratio>*{height:100%;left:0;position:absolute;top:0;width:100%}.ratio-1x1{--bs-aspect-ratio:100%}.ratio-4x3{--bs-aspect-ratio:75%}.ratio-16x9{--bs-aspect-ratio:56.25%}.ratio-21x9{--bs-aspect-ratio:42.85714%}.fixed-top{top:0}.fixed-bottom,.fixed-top{left:0;position:fixed;right:0;z-index:1030}.fixed-bottom{bottom:0}.sticky-top{top:0}.sticky-bottom,.sticky-top{position:sticky;z-index:1020}.sticky-bottom{bottom:0}@media (min-width:540px){.sticky-sm-top{position:sticky;top:0;z-index:1020}.sticky-sm-bottom{bottom:0;position:sticky;z-index:1020}}@media (min-width:720px){.sticky-md-top{position:sticky;top:0;z-index:1020}.sticky-md-bottom{bottom:0;position:sticky;z-index:1020}}@media (min-width:960px){.sticky-lg-top{position:sticky;top:0;z-index:1020}.sticky-lg-bottom{bottom:0;position:sticky;z-index:1020}}@media (min-width:1200px){.sticky-xl-top{position:sticky;top:0;z-index:1020}.sticky-xl-bottom{bottom:0;position:sticky;z-index:1020}}.hstack{align-items:center;flex-direction:row}.hstack,.vstack{align-self:stretch;display:flex}.vstack{flex:1 1 auto;flex-direction:column}.visually-hidden,.visually-hidden-focusable:not(:focus):not(:focus-within){clip:rect(0,0,0,0)!important;border:0!important;height:1px!important;margin:-1px!important;overflow:hidden!important;padding:0!important;position:absolute!important;white-space:nowrap!important;width:1px!important}.stretched-link:after{bottom:0;content:"";left:0;position:absolute;right:0;top:0;z-index:1}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.vr{align-self:stretch;background-color:currentcolor;display:inline-block;min-height:1em;opacity:.25;width:1px}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.float-start{float:left!important}.float-end{float:right!important}.float-none{float:none!important}.opacity-0{opacity:0!important}.opacity-25{opacity:.25!important}.opacity-50{opacity:.5!important}.opacity-75{opacity:.75!important}.opacity-100{opacity:1!important}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.overflow-visible{overflow:visible!important}.overflow-scroll{overflow:scroll!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-grid{display:grid!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}.d-none{display:none!important}.shadow{box-shadow:0 .5rem 1rem rgba(0,0,0,.15)!important}.shadow-sm{box-shadow:0 .125rem .25rem rgba(0,0,0,.075)!important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,.175)!important}.shadow-none{box-shadow:none!important}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:sticky!important}.top-0{top:0!important}.top-50{top:50%!important}.top-100{top:100%!important}.bottom-0{bottom:0!important}.bottom-50{bottom:50%!important}.bottom-100{bottom:100%!important}.start-0{left:0!important}.start-50{left:50%!important}.start-100{left:100%!important}.end-0{right:0!important}.end-50{right:50%!important}.end-100{right:100%!important}.translate-middle{transform:translate(-50%,-50%)!important}.translate-middle-x{transform:translateX(-50%)!important}.translate-middle-y{transform:translateY(-50%)!important}.border{border:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-0{border:0!important}.border-top{border-top:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-top-0{border-top:0!important}.border-end{border-right:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-end-0{border-right:0!important}.border-bottom{border-bottom:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-bottom-0{border-bottom:0!important}.border-start{border-left:var(--bs-border-width) var(--bs-border-style) var(--bs-border-color)!important}.border-start-0{border-left:0!important}.border-primary{--bs-border-opacity:1;border-color:rgba(var(--bs-primary-rgb),var(--bs-border-opacity))!important}.border-secondary{--bs-border-opacity:1;border-color:rgba(var(--bs-secondary-rgb),var(--bs-border-opacity))!important}.border-success{--bs-border-opacity:1;border-color:rgba(var(--bs-success-rgb),var(--bs-border-opacity))!important}.border-info{--bs-border-opacity:1;border-color:rgba(var(--bs-info-rgb),var(--bs-border-opacity))!important}.border-warning{--bs-border-opacity:1;border-color:rgba(var(--bs-warning-rgb),var(--bs-border-opacity))!important}.border-danger{--bs-border-opacity:1;border-color:rgba(var(--bs-danger-rgb),var(--bs-border-opacity))!important}.border-light{--bs-border-opacity:1;border-color:rgba(var(--bs-light-rgb),var(--bs-border-opacity))!important}.border-dark{--bs-border-opacity:1;border-color:rgba(var(--bs-dark-rgb),var(--bs-border-opacity))!important}.border-white{--bs-border-opacity:1;border-color:rgba(var(--bs-white-rgb),var(--bs-border-opacity))!important}.border-1{--bs-border-width:1px}.border-2{--bs-border-width:2px}.border-3{--bs-border-width:3px}.border-4{--bs-border-width:4px}.border-5{--bs-border-width:5px}.border-opacity-10{--bs-border-opacity:.1}.border-opacity-25{--bs-border-opacity:.25}.border-opacity-50{--bs-border-opacity:.5}.border-opacity-75{--bs-border-opacity:.75}.border-opacity-100{--bs-border-opacity:1}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.mw-100{max-width:100%!important}.vw-100{width:100vw!important}.min-vw-100{min-width:100vw!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mh-100{max-height:100%!important}.vh-100{height:100vh!important}.min-vh-100{min-height:100vh!important}.flex-fill{flex:1 1 auto!important}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.justify-content-evenly{justify-content:space-evenly!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}.order-first{order:-1!important}.order-0{order:0!important}.order-1{order:1!important}.order-2{order:2!important}.order-3{order:3!important}.order-4{order:4!important}.order-5{order:5!important}.order-last{order:6!important}.m-0{margin:0!important}.m-1{margin:.25rem!important}.m-2{margin:.5rem!important}.m-3{margin:1rem!important}.m-4{margin:1.5rem!important}.m-5{margin:3rem!important}.m-auto{margin:auto!important}.mx-0{margin-left:0!important;margin-right:0!important}.mx-1{margin-left:.25rem!important;margin-right:.25rem!important}.mx-2{margin-left:.5rem!important;margin-right:.5rem!important}.mx-3{margin-left:1rem!important;margin-right:1rem!important}.mx-4{margin-left:1.5rem!important;margin-right:1.5rem!important}.mx-5{margin-left:3rem!important;margin-right:3rem!important}.mx-auto{margin-left:auto!important;margin-right:auto!important}.my-0{margin-bottom:0!important;margin-top:0!important}.my-1{margin-bottom:.25rem!important;margin-top:.25rem!important}.my-2{margin-bottom:.5rem!important;margin-top:.5rem!important}.my-3{margin-bottom:1rem!important;margin-top:1rem!important}.my-4{margin-bottom:1.5rem!important;margin-top:1.5rem!important}.my-5{margin-bottom:3rem!important;margin-top:3rem!important}.my-auto{margin-bottom:auto!important;margin-top:auto!important}.mt-0{margin-top:0!important}.mt-1{margin-top:.25rem!important}.mt-2{margin-top:.5rem!important}.mt-3{margin-top:1rem!important}.mt-4{margin-top:1.5rem!important}.mt-5{margin-top:3rem!important}.mt-auto{margin-top:auto!important}.me-0{margin-right:0!important}.me-1{margin-right:.25rem!important}.me-2{margin-right:.5rem!important}.me-3{margin-right:1rem!important}.me-4{margin-right:1.5rem!important}.me-5{margin-right:3rem!important}.me-auto{margin-right:auto!important}.mb-0{margin-bottom:0!important}.mb-1{margin-bottom:.25rem!important}.mb-2{margin-bottom:.5rem!important}.mb-3{margin-bottom:1rem!important}.mb-4{margin-bottom:1.5rem!important}.mb-5{margin-bottom:3rem!important}.mb-auto{margin-bottom:auto!important}.ms-0{margin-left:0!important}.ms-1{margin-left:.25rem!important}.ms-2{margin-left:.5rem!important}.ms-3{margin-left:1rem!important}.ms-4{margin-left:1.5rem!important}.ms-5{margin-left:3rem!important}.ms-auto{margin-left:auto!important}.p-0{padding:0!important}.p-1{padding:.25rem!important}.p-2{padding:.5rem!important}.p-3{padding:1rem!important}.p-4{padding:1.5rem!important}.p-5{padding:3rem!important}.px-0{padding-left:0!important;padding-right:0!important}.px-1{padding-left:.25rem!important;padding-right:.25rem!important}.px-2{padding-left:.5rem!important;padding-right:.5rem!important}.px-3{padding-left:1rem!important;padding-right:1rem!important}.px-4{padding-left:1.5rem!important;padding-right:1.5rem!important}.px-5{padding-left:3rem!important;padding-right:3rem!important}.py-0{padding-bottom:0!important;padding-top:0!important}.py-1{padding-bottom:.25rem!important;padding-top:.25rem!important}.py-2{padding-bottom:.5rem!important;padding-top:.5rem!important}.py-3{padding-bottom:1rem!important;padding-top:1rem!important}.py-4{padding-bottom:1.5rem!important;padding-top:1.5rem!important}.py-5{padding-bottom:3rem!important;padding-top:3rem!important}.pt-0{padding-top:0!important}.pt-1{padding-top:.25rem!important}.pt-2{padding-top:.5rem!important}.pt-3{padding-top:1rem!important}.pt-4{padding-top:1.5rem!important}.pt-5{padding-top:3rem!important}.pe-0{padding-right:0!important}.pe-1{padding-right:.25rem!important}.pe-2{padding-right:.5rem!important}.pe-3{padding-right:1rem!important}.pe-4{padding-right:1.5rem!important}.pe-5{padding-right:3rem!important}.pb-0{padding-bottom:0!important}.pb-1{padding-bottom:.25rem!important}.pb-2{padding-bottom:.5rem!important}.pb-3{padding-bottom:1rem!important}.pb-4{padding-bottom:1.5rem!important}.pb-5{padding-bottom:3rem!important}.ps-0{padding-left:0!important}.ps-1{padding-left:.25rem!important}.ps-2{padding-left:.5rem!important}.ps-3{padding-left:1rem!important}.ps-4{padding-left:1.5rem!important}.ps-5{padding-left:3rem!important}.gap-0{gap:0!important}.gap-1{gap:.25rem!important}.gap-2{gap:.5rem!important}.gap-3{gap:1rem!important}.gap-4{gap:1.5rem!important}.gap-5{gap:3rem!important}.font-monospace{font-family:var(--bs-font-monospace)!important}.fs-1{font-size:calc(1.375rem + 1.5vw)!important}.fs-2{font-size:calc(1.325rem + .9vw)!important}.fs-3{font-size:calc(1.3rem + .6vw)!important}.fs-4{font-size:calc(1.275rem + .3vw)!important}.fs-5{font-size:1.25rem!important}.fs-6{font-size:1rem!important}.fst-italic{font-style:italic!important}.fst-normal{font-style:normal!important}.fw-light{font-weight:300!important}.fw-lighter{font-weight:lighter!important}.fw-normal{font-weight:400!important}.fw-bold{font-weight:700!important}.fw-semibold{font-weight:600!important}.fw-bolder{font-weight:bolder!important}.lh-1{line-height:1!important}.lh-sm{line-height:1.25!important}.lh-base{line-height:1.5!important}.lh-lg{line-height:2!important}.text-start{text-align:left!important}.text-end{text-align:right!important}.text-center{text-align:center!important}.text-decoration-none{text-decoration:none!important}.text-decoration-underline{text-decoration:underline!important}.text-decoration-line-through{text-decoration:line-through!important}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-break{word-wrap:break-word!important;word-break:break-word!important}.text-primary{--bs-text-opacity:1;color:rgba(var(--bs-primary-rgb),var(--bs-text-opacity))!important}.text-secondary{--bs-text-opacity:1;color:rgba(var(--bs-secondary-rgb),var(--bs-text-opacity))!important}.text-success{--bs-text-opacity:1;color:rgba(var(--bs-success-rgb),var(--bs-text-opacity))!important}.text-info{--bs-text-opacity:1;color:rgba(var(--bs-info-rgb),var(--bs-text-opacity))!important}.text-warning{--bs-text-opacity:1;color:rgba(var(--bs-warning-rgb),var(--bs-text-opacity))!important}.text-danger{--bs-text-opacity:1;color:rgba(var(--bs-danger-rgb),var(--bs-text-opacity))!important}.text-light{--bs-text-opacity:1;color:rgba(var(--bs-light-rgb),var(--bs-text-opacity))!important}.text-dark{--bs-text-opacity:1;color:rgba(var(--bs-dark-rgb),var(--bs-text-opacity))!important}.text-black{--bs-text-opacity:1;color:rgba(var(--bs-black-rgb),var(--bs-text-opacity))!important}.text-white{--bs-text-opacity:1;color:rgba(var(--bs-white-rgb),var(--bs-text-opacity))!important}.text-body{--bs-text-opacity:1;color:rgba(var(--bs-body-color-rgb),var(--bs-text-opacity))!important}.text-muted{--bs-text-opacity:1;color:#6c757d!important}.text-black-50{--bs-text-opacity:1;color:rgba(0,0,0,.5)!important}.text-white-50{--bs-text-opacity:1;color:hsla(0,0%,100%,.5)!important}.text-reset{--bs-text-opacity:1;color:inherit!important}.text-opacity-25{--bs-text-opacity:.25}.text-opacity-50{--bs-text-opacity:.5}.text-opacity-75{--bs-text-opacity:.75}.text-opacity-100{--bs-text-opacity:1}.bg-primary{--bs-bg-opacity:1;background-color:rgba(var(--bs-primary-rgb),var(--bs-bg-opacity))!important}.bg-secondary{--bs-bg-opacity:1;background-color:rgba(var(--bs-secondary-rgb),var(--bs-bg-opacity))!important}.bg-success{--bs-bg-opacity:1;background-color:rgba(var(--bs-success-rgb),var(--bs-bg-opacity))!important}.bg-info{--bs-bg-opacity:1;background-color:rgba(var(--bs-info-rgb),var(--bs-bg-opacity))!important}.bg-warning{--bs-bg-opacity:1;background-color:rgba(var(--bs-warning-rgb),var(--bs-bg-opacity))!important}.bg-danger{--bs-bg-opacity:1;background-color:rgba(var(--bs-danger-rgb),var(--bs-bg-opacity))!important}.bg-light{--bs-bg-opacity:1;background-color:rgba(var(--bs-light-rgb),var(--bs-bg-opacity))!important}.bg-dark{--bs-bg-opacity:1;background-color:rgba(var(--bs-dark-rgb),var(--bs-bg-opacity))!important}.bg-black{--bs-bg-opacity:1;background-color:rgba(var(--bs-black-rgb),var(--bs-bg-opacity))!important}.bg-white{--bs-bg-opacity:1;background-color:rgba(var(--bs-white-rgb),var(--bs-bg-opacity))!important}.bg-body{--bs-bg-opacity:1;background-color:rgba(var(--bs-body-bg-rgb),var(--bs-bg-opacity))!important}.bg-transparent{--bs-bg-opacity:1;background-color:transparent!important}.bg-opacity-10{--bs-bg-opacity:.1}.bg-opacity-25{--bs-bg-opacity:.25}.bg-opacity-50{--bs-bg-opacity:.5}.bg-opacity-75{--bs-bg-opacity:.75}.bg-opacity-100{--bs-bg-opacity:1}.bg-gradient{background-image:var(--bs-gradient)!important}.user-select-all{user-select:all!important}.user-select-auto{user-select:auto!important}.user-select-none{user-select:none!important}.pe-none{pointer-events:none!important}.pe-auto{pointer-events:auto!important}.rounded{border-radius:var(--bs-border-radius)!important}.rounded-0{border-radius:0!important}.rounded-1{border-radius:var(--bs-border-radius-sm)!important}.rounded-2{border-radius:var(--bs-border-radius)!important}.rounded-3{border-radius:var(--bs-border-radius-lg)!important}.rounded-4{border-radius:var(--bs-border-radius-xl)!important}.rounded-5{border-radius:var(--bs-border-radius-2xl)!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:var(--bs-border-radius-pill)!important}.rounded-top{border-top-left-radius:var(--bs-border-radius)!important}.rounded-end,.rounded-top{border-top-right-radius:var(--bs-border-radius)!important}.rounded-bottom,.rounded-end{border-bottom-right-radius:var(--bs-border-radius)!important}.rounded-bottom,.rounded-start{border-bottom-left-radius:var(--bs-border-radius)!important}.rounded-start{border-top-left-radius:var(--bs-border-radius)!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media (min-width:540px){.float-sm-start{float:left!important}.float-sm-end{float:right!important}.float-sm-none{float:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-grid{display:grid!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}.d-sm-none{display:none!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.justify-content-sm-evenly{justify-content:space-evenly!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}.order-sm-first{order:-1!important}.order-sm-0{order:0!important}.order-sm-1{order:1!important}.order-sm-2{order:2!important}.order-sm-3{order:3!important}.order-sm-4{order:4!important}.order-sm-5{order:5!important}.order-sm-last{order:6!important}.m-sm-0{margin:0!important}.m-sm-1{margin:.25rem!important}.m-sm-2{margin:.5rem!important}.m-sm-3{margin:1rem!important}.m-sm-4{margin:1.5rem!important}.m-sm-5{margin:3rem!important}.m-sm-auto{margin:auto!important}.mx-sm-0{margin-left:0!important;margin-right:0!important}.mx-sm-1{margin-left:.25rem!important;margin-right:.25rem!important}.mx-sm-2{margin-left:.5rem!important;margin-right:.5rem!important}.mx-sm-3{margin-left:1rem!important;margin-right:1rem!important}.mx-sm-4{margin-left:1.5rem!important;margin-right:1.5rem!important}.mx-sm-5{margin-left:3rem!important;margin-right:3rem!important}.mx-sm-auto{margin-left:auto!important;margin-right:auto!important}.my-sm-0{margin-bottom:0!important;margin-top:0!important}.my-sm-1{margin-bottom:.25rem!important;margin-top:.25rem!important}.my-sm-2{margin-bottom:.5rem!important;margin-top:.5rem!important}.my-sm-3{margin-bottom:1rem!important;margin-top:1rem!important}.my-sm-4{margin-bottom:1.5rem!important;margin-top:1.5rem!important}.my-sm-5{margin-bottom:3rem!important;margin-top:3rem!important}.my-sm-auto{margin-bottom:auto!important;margin-top:auto!important}.mt-sm-0{margin-top:0!important}.mt-sm-1{margin-top:.25rem!important}.mt-sm-2{margin-top:.5rem!important}.mt-sm-3{margin-top:1rem!important}.mt-sm-4{margin-top:1.5rem!important}.mt-sm-5{margin-top:3rem!important}.mt-sm-auto{margin-top:auto!important}.me-sm-0{margin-right:0!important}.me-sm-1{margin-right:.25rem!important}.me-sm-2{margin-right:.5rem!important}.me-sm-3{margin-right:1rem!important}.me-sm-4{margin-right:1.5rem!important}.me-sm-5{margin-right:3rem!important}.me-sm-auto{margin-right:auto!important}.mb-sm-0{margin-bottom:0!important}.mb-sm-1{margin-bottom:.25rem!important}.mb-sm-2{margin-bottom:.5rem!important}.mb-sm-3{margin-bottom:1rem!important}.mb-sm-4{margin-bottom:1.5rem!important}.mb-sm-5{margin-bottom:3rem!important}.mb-sm-auto{margin-bottom:auto!important}.ms-sm-0{margin-left:0!important}.ms-sm-1{margin-left:.25rem!important}.ms-sm-2{margin-left:.5rem!important}.ms-sm-3{margin-left:1rem!important}.ms-sm-4{margin-left:1.5rem!important}.ms-sm-5{margin-left:3rem!important}.ms-sm-auto{margin-left:auto!important}.p-sm-0{padding:0!important}.p-sm-1{padding:.25rem!important}.p-sm-2{padding:.5rem!important}.p-sm-3{padding:1rem!important}.p-sm-4{padding:1.5rem!important}.p-sm-5{padding:3rem!important}.px-sm-0{padding-left:0!important;padding-right:0!important}.px-sm-1{padding-left:.25rem!important;padding-right:.25rem!important}.px-sm-2{padding-left:.5rem!important;padding-right:.5rem!important}.px-sm-3{padding-left:1rem!important;padding-right:1rem!important}.px-sm-4{padding-left:1.5rem!important;padding-right:1.5rem!important}.px-sm-5{padding-left:3rem!important;padding-right:3rem!important}.py-sm-0{padding-bottom:0!important;padding-top:0!important}.py-sm-1{padding-bottom:.25rem!important;padding-top:.25rem!important}.py-sm-2{padding-bottom:.5rem!important;padding-top:.5rem!important}.py-sm-3{padding-bottom:1rem!important;padding-top:1rem!important}.py-sm-4{padding-bottom:1.5rem!important;padding-top:1.5rem!important}.py-sm-5{padding-bottom:3rem!important;padding-top:3rem!important}.pt-sm-0{padding-top:0!important}.pt-sm-1{padding-top:.25rem!important}.pt-sm-2{padding-top:.5rem!important}.pt-sm-3{padding-top:1rem!important}.pt-sm-4{padding-top:1.5rem!important}.pt-sm-5{padding-top:3rem!important}.pe-sm-0{padding-right:0!important}.pe-sm-1{padding-right:.25rem!important}.pe-sm-2{padding-right:.5rem!important}.pe-sm-3{padding-right:1rem!important}.pe-sm-4{padding-right:1.5rem!important}.pe-sm-5{padding-right:3rem!important}.pb-sm-0{padding-bottom:0!important}.pb-sm-1{padding-bottom:.25rem!important}.pb-sm-2{padding-bottom:.5rem!important}.pb-sm-3{padding-bottom:1rem!important}.pb-sm-4{padding-bottom:1.5rem!important}.pb-sm-5{padding-bottom:3rem!important}.ps-sm-0{padding-left:0!important}.ps-sm-1{padding-left:.25rem!important}.ps-sm-2{padding-left:.5rem!important}.ps-sm-3{padding-left:1rem!important}.ps-sm-4{padding-left:1.5rem!important}.ps-sm-5{padding-left:3rem!important}.gap-sm-0{gap:0!important}.gap-sm-1{gap:.25rem!important}.gap-sm-2{gap:.5rem!important}.gap-sm-3{gap:1rem!important}.gap-sm-4{gap:1.5rem!important}.gap-sm-5{gap:3rem!important}.text-sm-start{text-align:left!important}.text-sm-end{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:720px){.float-md-start{float:left!important}.float-md-end{float:right!important}.float-md-none{float:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-grid{display:grid!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}.d-md-none{display:none!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.justify-content-md-evenly{justify-content:space-evenly!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}.order-md-first{order:-1!important}.order-md-0{order:0!important}.order-md-1{order:1!important}.order-md-2{order:2!important}.order-md-3{order:3!important}.order-md-4{order:4!important}.order-md-5{order:5!important}.order-md-last{order:6!important}.m-md-0{margin:0!important}.m-md-1{margin:.25rem!important}.m-md-2{margin:.5rem!important}.m-md-3{margin:1rem!important}.m-md-4{margin:1.5rem!important}.m-md-5{margin:3rem!important}.m-md-auto{margin:auto!important}.mx-md-0{margin-left:0!important;margin-right:0!important}.mx-md-1{margin-left:.25rem!important;margin-right:.25rem!important}.mx-md-2{margin-left:.5rem!important;margin-right:.5rem!important}.mx-md-3{margin-left:1rem!important;margin-right:1rem!important}.mx-md-4{margin-left:1.5rem!important;margin-right:1.5rem!important}.mx-md-5{margin-left:3rem!important;margin-right:3rem!important}.mx-md-auto{margin-left:auto!important;margin-right:auto!important}.my-md-0{margin-bottom:0!important;margin-top:0!important}.my-md-1{margin-bottom:.25rem!important;margin-top:.25rem!important}.my-md-2{margin-bottom:.5rem!important;margin-top:.5rem!important}.my-md-3{margin-bottom:1rem!important;margin-top:1rem!important}.my-md-4{margin-bottom:1.5rem!important;margin-top:1.5rem!important}.my-md-5{margin-bottom:3rem!important;margin-top:3rem!important}.my-md-auto{margin-bottom:auto!important;margin-top:auto!important}.mt-md-0{margin-top:0!important}.mt-md-1{margin-top:.25rem!important}.mt-md-2{margin-top:.5rem!important}.mt-md-3{margin-top:1rem!important}.mt-md-4{margin-top:1.5rem!important}.mt-md-5{margin-top:3rem!important}.mt-md-auto{margin-top:auto!important}.me-md-0{margin-right:0!important}.me-md-1{margin-right:.25rem!important}.me-md-2{margin-right:.5rem!important}.me-md-3{margin-right:1rem!important}.me-md-4{margin-right:1.5rem!important}.me-md-5{margin-right:3rem!important}.me-md-auto{margin-right:auto!important}.mb-md-0{margin-bottom:0!important}.mb-md-1{margin-bottom:.25rem!important}.mb-md-2{margin-bottom:.5rem!important}.mb-md-3{margin-bottom:1rem!important}.mb-md-4{margin-bottom:1.5rem!important}.mb-md-5{margin-bottom:3rem!important}.mb-md-auto{margin-bottom:auto!important}.ms-md-0{margin-left:0!important}.ms-md-1{margin-left:.25rem!important}.ms-md-2{margin-left:.5rem!important}.ms-md-3{margin-left:1rem!important}.ms-md-4{margin-left:1.5rem!important}.ms-md-5{margin-left:3rem!important}.ms-md-auto{margin-left:auto!important}.p-md-0{padding:0!important}.p-md-1{padding:.25rem!important}.p-md-2{padding:.5rem!important}.p-md-3{padding:1rem!important}.p-md-4{padding:1.5rem!important}.p-md-5{padding:3rem!important}.px-md-0{padding-left:0!important;padding-right:0!important}.px-md-1{padding-left:.25rem!important;padding-right:.25rem!important}.px-md-2{padding-left:.5rem!important;padding-right:.5rem!important}.px-md-3{padding-left:1rem!important;padding-right:1rem!important}.px-md-4{padding-left:1.5rem!important;padding-right:1.5rem!important}.px-md-5{padding-left:3rem!important;padding-right:3rem!important}.py-md-0{padding-bottom:0!important;padding-top:0!important}.py-md-1{padding-bottom:.25rem!important;padding-top:.25rem!important}.py-md-2{padding-bottom:.5rem!important;padding-top:.5rem!important}.py-md-3{padding-bottom:1rem!important;padding-top:1rem!important}.py-md-4{padding-bottom:1.5rem!important;padding-top:1.5rem!important}.py-md-5{padding-bottom:3rem!important;padding-top:3rem!important}.pt-md-0{padding-top:0!important}.pt-md-1{padding-top:.25rem!important}.pt-md-2{padding-top:.5rem!important}.pt-md-3{padding-top:1rem!important}.pt-md-4{padding-top:1.5rem!important}.pt-md-5{padding-top:3rem!important}.pe-md-0{padding-right:0!important}.pe-md-1{padding-right:.25rem!important}.pe-md-2{padding-right:.5rem!important}.pe-md-3{padding-right:1rem!important}.pe-md-4{padding-right:1.5rem!important}.pe-md-5{padding-right:3rem!important}.pb-md-0{padding-bottom:0!important}.pb-md-1{padding-bottom:.25rem!important}.pb-md-2{padding-bottom:.5rem!important}.pb-md-3{padding-bottom:1rem!important}.pb-md-4{padding-bottom:1.5rem!important}.pb-md-5{padding-bottom:3rem!important}.ps-md-0{padding-left:0!important}.ps-md-1{padding-left:.25rem!important}.ps-md-2{padding-left:.5rem!important}.ps-md-3{padding-left:1rem!important}.ps-md-4{padding-left:1.5rem!important}.ps-md-5{padding-left:3rem!important}.gap-md-0{gap:0!important}.gap-md-1{gap:.25rem!important}.gap-md-2{gap:.5rem!important}.gap-md-3{gap:1rem!important}.gap-md-4{gap:1.5rem!important}.gap-md-5{gap:3rem!important}.text-md-start{text-align:left!important}.text-md-end{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:960px){.float-lg-start{float:left!important}.float-lg-end{float:right!important}.float-lg-none{float:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-grid{display:grid!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}.d-lg-none{display:none!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.justify-content-lg-evenly{justify-content:space-evenly!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}.order-lg-first{order:-1!important}.order-lg-0{order:0!important}.order-lg-1{order:1!important}.order-lg-2{order:2!important}.order-lg-3{order:3!important}.order-lg-4{order:4!important}.order-lg-5{order:5!important}.order-lg-last{order:6!important}.m-lg-0{margin:0!important}.m-lg-1{margin:.25rem!important}.m-lg-2{margin:.5rem!important}.m-lg-3{margin:1rem!important}.m-lg-4{margin:1.5rem!important}.m-lg-5{margin:3rem!important}.m-lg-auto{margin:auto!important}.mx-lg-0{margin-left:0!important;margin-right:0!important}.mx-lg-1{margin-left:.25rem!important;margin-right:.25rem!important}.mx-lg-2{margin-left:.5rem!important;margin-right:.5rem!important}.mx-lg-3{margin-left:1rem!important;margin-right:1rem!important}.mx-lg-4{margin-left:1.5rem!important;margin-right:1.5rem!important}.mx-lg-5{margin-left:3rem!important;margin-right:3rem!important}.mx-lg-auto{margin-left:auto!important;margin-right:auto!important}.my-lg-0{margin-bottom:0!important;margin-top:0!important}.my-lg-1{margin-bottom:.25rem!important;margin-top:.25rem!important}.my-lg-2{margin-bottom:.5rem!important;margin-top:.5rem!important}.my-lg-3{margin-bottom:1rem!important;margin-top:1rem!important}.my-lg-4{margin-bottom:1.5rem!important;margin-top:1.5rem!important}.my-lg-5{margin-bottom:3rem!important;margin-top:3rem!important}.my-lg-auto{margin-bottom:auto!important;margin-top:auto!important}.mt-lg-0{margin-top:0!important}.mt-lg-1{margin-top:.25rem!important}.mt-lg-2{margin-top:.5rem!important}.mt-lg-3{margin-top:1rem!important}.mt-lg-4{margin-top:1.5rem!important}.mt-lg-5{margin-top:3rem!important}.mt-lg-auto{margin-top:auto!important}.me-lg-0{margin-right:0!important}.me-lg-1{margin-right:.25rem!important}.me-lg-2{margin-right:.5rem!important}.me-lg-3{margin-right:1rem!important}.me-lg-4{margin-right:1.5rem!important}.me-lg-5{margin-right:3rem!important}.me-lg-auto{margin-right:auto!important}.mb-lg-0{margin-bottom:0!important}.mb-lg-1{margin-bottom:.25rem!important}.mb-lg-2{margin-bottom:.5rem!important}.mb-lg-3{margin-bottom:1rem!important}.mb-lg-4{margin-bottom:1.5rem!important}.mb-lg-5{margin-bottom:3rem!important}.mb-lg-auto{margin-bottom:auto!important}.ms-lg-0{margin-left:0!important}.ms-lg-1{margin-left:.25rem!important}.ms-lg-2{margin-left:.5rem!important}.ms-lg-3{margin-left:1rem!important}.ms-lg-4{margin-left:1.5rem!important}.ms-lg-5{margin-left:3rem!important}.ms-lg-auto{margin-left:auto!important}.p-lg-0{padding:0!important}.p-lg-1{padding:.25rem!important}.p-lg-2{padding:.5rem!important}.p-lg-3{padding:1rem!important}.p-lg-4{padding:1.5rem!important}.p-lg-5{padding:3rem!important}.px-lg-0{padding-left:0!important;padding-right:0!important}.px-lg-1{padding-left:.25rem!important;padding-right:.25rem!important}.px-lg-2{padding-left:.5rem!important;padding-right:.5rem!important}.px-lg-3{padding-left:1rem!important;padding-right:1rem!important}.px-lg-4{padding-left:1.5rem!important;padding-right:1.5rem!important}.px-lg-5{padding-left:3rem!important;padding-right:3rem!important}.py-lg-0{padding-bottom:0!important;padding-top:0!important}.py-lg-1{padding-bottom:.25rem!important;padding-top:.25rem!important}.py-lg-2{padding-bottom:.5rem!important;padding-top:.5rem!important}.py-lg-3{padding-bottom:1rem!important;padding-top:1rem!important}.py-lg-4{padding-bottom:1.5rem!important;padding-top:1.5rem!important}.py-lg-5{padding-bottom:3rem!important;padding-top:3rem!important}.pt-lg-0{padding-top:0!important}.pt-lg-1{padding-top:.25rem!important}.pt-lg-2{padding-top:.5rem!important}.pt-lg-3{padding-top:1rem!important}.pt-lg-4{padding-top:1.5rem!important}.pt-lg-5{padding-top:3rem!important}.pe-lg-0{padding-right:0!important}.pe-lg-1{padding-right:.25rem!important}.pe-lg-2{padding-right:.5rem!important}.pe-lg-3{padding-right:1rem!important}.pe-lg-4{padding-right:1.5rem!important}.pe-lg-5{padding-right:3rem!important}.pb-lg-0{padding-bottom:0!important}.pb-lg-1{padding-bottom:.25rem!important}.pb-lg-2{padding-bottom:.5rem!important}.pb-lg-3{padding-bottom:1rem!important}.pb-lg-4{padding-bottom:1.5rem!important}.pb-lg-5{padding-bottom:3rem!important}.ps-lg-0{padding-left:0!important}.ps-lg-1{padding-left:.25rem!important}.ps-lg-2{padding-left:.5rem!important}.ps-lg-3{padding-left:1rem!important}.ps-lg-4{padding-left:1.5rem!important}.ps-lg-5{padding-left:3rem!important}.gap-lg-0{gap:0!important}.gap-lg-1{gap:.25rem!important}.gap-lg-2{gap:.5rem!important}.gap-lg-3{gap:1rem!important}.gap-lg-4{gap:1.5rem!important}.gap-lg-5{gap:3rem!important}.text-lg-start{text-align:left!important}.text-lg-end{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.float-xl-start{float:left!important}.float-xl-end{float:right!important}.float-xl-none{float:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-grid{display:grid!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}.d-xl-none{display:none!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.justify-content-xl-evenly{justify-content:space-evenly!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}.order-xl-first{order:-1!important}.order-xl-0{order:0!important}.order-xl-1{order:1!important}.order-xl-2{order:2!important}.order-xl-3{order:3!important}.order-xl-4{order:4!important}.order-xl-5{order:5!important}.order-xl-last{order:6!important}.m-xl-0{margin:0!important}.m-xl-1{margin:.25rem!important}.m-xl-2{margin:.5rem!important}.m-xl-3{margin:1rem!important}.m-xl-4{margin:1.5rem!important}.m-xl-5{margin:3rem!important}.m-xl-auto{margin:auto!important}.mx-xl-0{margin-left:0!important;margin-right:0!important}.mx-xl-1{margin-left:.25rem!important;margin-right:.25rem!important}.mx-xl-2{margin-left:.5rem!important;margin-right:.5rem!important}.mx-xl-3{margin-left:1rem!important;margin-right:1rem!important}.mx-xl-4{margin-left:1.5rem!important;margin-right:1.5rem!important}.mx-xl-5{margin-left:3rem!important;margin-right:3rem!important}.mx-xl-auto{margin-left:auto!important;margin-right:auto!important}.my-xl-0{margin-bottom:0!important;margin-top:0!important}.my-xl-1{margin-bottom:.25rem!important;margin-top:.25rem!important}.my-xl-2{margin-bottom:.5rem!important;margin-top:.5rem!important}.my-xl-3{margin-bottom:1rem!important;margin-top:1rem!important}.my-xl-4{margin-bottom:1.5rem!important;margin-top:1.5rem!important}.my-xl-5{margin-bottom:3rem!important;margin-top:3rem!important}.my-xl-auto{margin-bottom:auto!important;margin-top:auto!important}.mt-xl-0{margin-top:0!important}.mt-xl-1{margin-top:.25rem!important}.mt-xl-2{margin-top:.5rem!important}.mt-xl-3{margin-top:1rem!important}.mt-xl-4{margin-top:1.5rem!important}.mt-xl-5{margin-top:3rem!important}.mt-xl-auto{margin-top:auto!important}.me-xl-0{margin-right:0!important}.me-xl-1{margin-right:.25rem!important}.me-xl-2{margin-right:.5rem!important}.me-xl-3{margin-right:1rem!important}.me-xl-4{margin-right:1.5rem!important}.me-xl-5{margin-right:3rem!important}.me-xl-auto{margin-right:auto!important}.mb-xl-0{margin-bottom:0!important}.mb-xl-1{margin-bottom:.25rem!important}.mb-xl-2{margin-bottom:.5rem!important}.mb-xl-3{margin-bottom:1rem!important}.mb-xl-4{margin-bottom:1.5rem!important}.mb-xl-5{margin-bottom:3rem!important}.mb-xl-auto{margin-bottom:auto!important}.ms-xl-0{margin-left:0!important}.ms-xl-1{margin-left:.25rem!important}.ms-xl-2{margin-left:.5rem!important}.ms-xl-3{margin-left:1rem!important}.ms-xl-4{margin-left:1.5rem!important}.ms-xl-5{margin-left:3rem!important}.ms-xl-auto{margin-left:auto!important}.p-xl-0{padding:0!important}.p-xl-1{padding:.25rem!important}.p-xl-2{padding:.5rem!important}.p-xl-3{padding:1rem!important}.p-xl-4{padding:1.5rem!important}.p-xl-5{padding:3rem!important}.px-xl-0{padding-left:0!important;padding-right:0!important}.px-xl-1{padding-left:.25rem!important;padding-right:.25rem!important}.px-xl-2{padding-left:.5rem!important;padding-right:.5rem!important}.px-xl-3{padding-left:1rem!important;padding-right:1rem!important}.px-xl-4{padding-left:1.5rem!important;padding-right:1.5rem!important}.px-xl-5{padding-left:3rem!important;padding-right:3rem!important}.py-xl-0{padding-bottom:0!important;padding-top:0!important}.py-xl-1{padding-bottom:.25rem!important;padding-top:.25rem!important}.py-xl-2{padding-bottom:.5rem!important;padding-top:.5rem!important}.py-xl-3{padding-bottom:1rem!important;padding-top:1rem!important}.py-xl-4{padding-bottom:1.5rem!important;padding-top:1.5rem!important}.py-xl-5{padding-bottom:3rem!important;padding-top:3rem!important}.pt-xl-0{padding-top:0!important}.pt-xl-1{padding-top:.25rem!important}.pt-xl-2{padding-top:.5rem!important}.pt-xl-3{padding-top:1rem!important}.pt-xl-4{padding-top:1.5rem!important}.pt-xl-5{padding-top:3rem!important}.pe-xl-0{padding-right:0!important}.pe-xl-1{padding-right:.25rem!important}.pe-xl-2{padding-right:.5rem!important}.pe-xl-3{padding-right:1rem!important}.pe-xl-4{padding-right:1.5rem!important}.pe-xl-5{padding-right:3rem!important}.pb-xl-0{padding-bottom:0!important}.pb-xl-1{padding-bottom:.25rem!important}.pb-xl-2{padding-bottom:.5rem!important}.pb-xl-3{padding-bottom:1rem!important}.pb-xl-4{padding-bottom:1.5rem!important}.pb-xl-5{padding-bottom:3rem!important}.ps-xl-0{padding-left:0!important}.ps-xl-1{padding-left:.25rem!important}.ps-xl-2{padding-left:.5rem!important}.ps-xl-3{padding-left:1rem!important}.ps-xl-4{padding-left:1.5rem!important}.ps-xl-5{padding-left:3rem!important}.gap-xl-0{gap:0!important}.gap-xl-1{gap:.25rem!important}.gap-xl-2{gap:.5rem!important}.gap-xl-3{gap:1rem!important}.gap-xl-4{gap:1.5rem!important}.gap-xl-5{gap:3rem!important}.text-xl-start{text-align:left!important}.text-xl-end{text-align:right!important}.text-xl-center{text-align:center!important}.fs-1{font-size:2.5rem!important}.fs-2{font-size:2rem!important}.fs-3{font-size:1.75rem!important}.fs-4{font-size:1.5rem!important}}@media print{.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-grid{display:grid!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}.d-print-none{display:none!important}} \ No newline at end of file diff --git a/_static/styles/pydata-sphinx-theme.css b/_static/styles/pydata-sphinx-theme.css deleted file mode 100644 index 65988ff60..000000000 --- a/_static/styles/pydata-sphinx-theme.css +++ /dev/null @@ -1 +0,0 @@ -html{--pst-header-height:4rem;--pst-header-article-height:calc(var(--pst-header-height)*2/3);--pst-sidebar-secondary:17rem;--pst-font-size-base:1rem;--pst-font-size-h1:2.5rem;--pst-font-size-h2:2rem;--pst-font-size-h3:1.75rem;--pst-font-size-h4:1.5rem;--pst-font-size-h5:1.25rem;--pst-font-size-h6:1.1rem;--pst-font-size-milli:0.9rem;--pst-sidebar-font-size:0.9rem;--pst-sidebar-font-size-mobile:1.1rem;--pst-sidebar-header-font-size:1.2rem;--pst-sidebar-header-font-weight:600;--pst-admonition-font-weight-heading:600;--pst-font-weight-caption:300;--pst-font-weight-heading:400;--pst-font-family-base-system:-apple-system,BlinkMacSystemFont,Segoe UI,"Helvetica Neue",Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol;--pst-font-family-monospace-system:"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;--pst-font-family-base:var(--pst-font-family-base-system);--pst-font-family-heading:var(--pst-font-family-base-system);--pst-font-family-monospace:var(--pst-font-family-monospace-system);--pst-font-size-icon:1.5rem;--pst-icon-check-circle:"\f058";--pst-icon-info-circle:"\f05a";--pst-icon-exclamation-triangle:"\f071";--pst-icon-exclamation-circle:"\f06a";--pst-icon-times-circle:"\f057";--pst-icon-lightbulb:"\f0eb";--pst-icon-download:"\f019";--pst-icon-angle-left:"\f104";--pst-icon-angle-right:"\f105";--pst-icon-external-link:"\f35d";--pst-icon-search-minus:"\f010";--pst-icon-github:"\f09b";--pst-icon-gitlab:"\f296";--pst-icon-share:"\f064";--pst-icon-bell:"\f0f3";--pst-icon-pencil:"\f303";--pst-breadcrumb-divider:"\f105";--pst-icon-admonition-default:var(--pst-icon-bell);--pst-icon-admonition-note:var(--pst-icon-info-circle);--pst-icon-admonition-attention:var(--pst-icon-exclamation-circle);--pst-icon-admonition-caution:var(--pst-icon-exclamation-triangle);--pst-icon-admonition-warning:var(--pst-icon-exclamation-triangle);--pst-icon-admonition-danger:var(--pst-icon-exclamation-triangle);--pst-icon-admonition-error:var(--pst-icon-times-circle);--pst-icon-admonition-hint:var(--pst-icon-lightbulb);--pst-icon-admonition-tip:var(--pst-icon-lightbulb);--pst-icon-admonition-important:var(--pst-icon-exclamation-circle);--pst-icon-admonition-seealso:var(--pst-icon-share);--pst-icon-admonition-todo:var(--pst-icon-pencil);--pst-icon-versionmodified-default:var(--pst-icon-exclamation-circle);--pst-icon-versionmodified-added:var(--pst-icon-exclamation-circle);--pst-icon-versionmodified-changed:var(--pst-icon-exclamation-circle);--pst-icon-versionmodified-deprecated:var(--pst-icon-exclamation-circle)}html:not([data-theme]){--pst-color-primary:#459db9;--pst-color-secondary:#ee9040;--pst-color-info:#459db9;--pst-color-warning:#ee9040;--pst-color-success:#28a745;--pst-color-attention:#ffc107;--pst-color-danger:#dc3545;--pst-color-text-base:#323232;--pst-color-text-muted:#646464;--pst-color-shadow:#d8d8d8;--pst-color-border:#c9c9c9;--pst-color-inline-code:#e83e8c;--pst-color-target:#fbe54e;--pst-color-background:#fff;--pst-color-on-background:#fff;--pst-color-surface:#f5f5f5;--pst-color-on-surface:#e1e1e1;--pst-color-link:var(--pst-color-primary);--pst-color-link-hover:var(--pst-color-warning)}html:not([data-theme]) .only-dark{display:none!important}html[data-theme=light]{--pst-color-attention:#ffc107;--pst-color-text-base:#323232;--pst-color-text-muted:#646464;--pst-color-shadow:#d8d8d8;--pst-color-border:#c9c9c9;--pst-color-inline-code:#e83e8c;--pst-color-target:#fbe54e;--pst-color-background:#fff;--pst-color-on-background:#fff;--pst-color-surface:#f5f5f5;--pst-color-on-surface:#e1e1e1;--pst-color-link:var(--pst-color-primary);--pst-color-link-hover:var(--pst-color-warning)}html[data-theme=light] .only-dark{display:none!important}html[data-theme=dark]{--pst-color-attention:#dca90f;--pst-color-text-base:#cecece;--pst-color-text-muted:#a6a6a6;--pst-color-shadow:#212121;--pst-color-border:silver;--pst-color-inline-code:#dd9ec2;--pst-color-target:#472700;--pst-color-background:#121212;--pst-color-on-background:#1e1e1e;--pst-color-surface:#212121;--pst-color-on-surface:#373737;--pst-color-link:var(--pst-color-primary);--pst-color-link-hover:var(--pst-color-warning)}html[data-theme=dark] .only-light{display:none!important}html[data-theme=dark] img:not(.only-dark):not(.dark-light){filter:brightness(.8) contrast(1.2)}html[data-theme=dark] .bd-content img:not(.only-dark):not(.dark-light){background:#fff;border-radius:.25rem}html[data-theme=dark] .MathJax_SVG *{fill:var(--pst-color-text-base)}.pst-color-primary{color:var(--pst-color-primary)}.pst-color-secondary{color:var(--pst-color-secondary)}.pst-color-info{color:var(--pst-color-info)}.pst-color-warning{color:var(--pst-color-warning)}.pst-color-success{color:var(--pst-color-success)}.pst-color-attention{color:var(--pst-color-attention)}.pst-color-danger{color:var(--pst-color-danger)}.pst-color-text-base{color:var(--pst-color-text-base)}.pst-color-text-muted{color:var(--pst-color-text-muted)}.pst-color-shadow{color:var(--pst-color-shadow)}.pst-color-border{color:var(--pst-color-border)}.pst-color-inline-code{color:var(--pst-color-inline-code)}.pst-color-target{color:var(--pst-color-target)}.pst-color-background{color:var(--pst-color-background)}.pst-color-on-background{color:var(--pst-color-on-background)}.pst-color-surface{color:var(--pst-color-surface)}.pst-color-on-surface{color:var(--pst-color-on-surface)}html{font-size:var(--pst-font-size-base);scroll-padding-top:calc(var(--pst-header-height) + 1rem)}body{background-color:var(--pst-color-background);color:var(--pst-color-text-base);display:flex;flex-direction:column;font-family:var(--pst-font-family-base);font-weight:400;line-height:1.65;min-height:100vh}body::-webkit-scrollbar{height:.5rem;width:.5rem}body::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted);border-radius:.25rem}body::-webkit-scrollbar-track{background:transparent}body::-webkit-scrollbar-thumb{background:var(--pst-color-on-surface)}body::-webkit-scrollbar-thumb:hover,body:hover::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted)}body::-webkit-scrollbar-track{background:var(--pst-color-background)}p{color:var(--pst-color-text-base);font-size:1em;margin-bottom:1.15rem}p.rubric{border-bottom:1px solid var(--pst-color-border)}p.centered{text-align:center}a{word-wrap:break-word;color:var(--pst-color-link);text-decoration:none}a:hover{color:var(--pst-color-link-hover);text-decoration:underline}a.headerlink{color:var(--pst-color-warning);font-size:.8em;margin-left:.2em;opacity:.4;padding:0 4px;text-decoration:none;transition:all .2s ease-out;user-select:none}a.headerlink:hover{opacity:1}a.github:before,a.gitlab:before{color:var(--pst-color-text-muted);font-family:Font Awesome\ 6 Brands;margin-right:.25rem}a.github:before{content:var(--pst-icon-github)}a.gitlab:before{content:var(--pst-icon-gitlab)}.heading-style,h1,h2,h3,h4,h5,h6{font-family:var(--pst-font-family-heading);font-weight:var(--pst-font-weight-heading);line-height:1.15;margin:2.75rem 0 1.05rem}h1{font-size:var(--pst-font-size-h1);margin-top:0}h1,h2{color:var(--pst-color-primary)}h2{font-size:var(--pst-font-size-h2)}h3{font-size:var(--pst-font-size-h3)}h3,h4{color:var(--pst-color-text-base)}h4{font-size:var(--pst-font-size-h4)}h5{font-size:var(--pst-font-size-h5)}h5,h6{color:var(--pst-color-text-base)}h6{font-size:var(--pst-font-size-h6)}.text_small,small{font-size:var(--pst-font-size-milli)}hr{border:0;border-top:1px solid var(--pst-color-border)}code,kbd,pre,samp{font-family:var(--pst-font-family-monospace)}kbd{background-color:var(--pst-color-on-background);color:var(--pst-color-text-muted)}kbd:not(.compound){border:1px solid var(--pst-color-border);box-shadow:1px 1px 1px var(--pst-color-shadow);margin:0 .1rem;padding:.1rem .4rem}code{color:var(--pst-color-inline-code)}pre{background-color:var(--pst-color-surface);border:1px solid var(--pst-color-border);border-radius:.25rem;color:var(--pst-color-text-base);line-height:1.2em;margin:1.5em 0;padding:1rem}pre::-webkit-scrollbar{height:.5rem;width:.5rem}pre::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted);border-radius:.25rem}pre::-webkit-scrollbar-track{background:transparent}pre::-webkit-scrollbar-thumb{background:var(--pst-color-on-surface)}pre::-webkit-scrollbar-thumb:hover,pre:hover::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted)}pre .linenos{opacity:.5;padding-right:10px}.skip-link{background-color:var(--pst-color-background);border-bottom:1px solid var(--pst-color-border);color:var(--pst-color-link);left:0;padding:.5rem;position:fixed;right:0;text-align:center;top:0;transform:translateY(-100%);transition:transform .15s ease-in-out;z-index:1055}.skip-link:focus{transform:translateY(0)}.bd-container{display:flex;flex-grow:1;justify-content:center}.bd-container .bd-container__inner{display:flex}.bd-page-width{width:100%}@media (min-width:960px){.bd-page-width{max-width:88rem}}.bd-header-announcement{align-items:center;display:flex;justify-content:center;min-height:3rem;padding:.5rem 12.5%;position:relative;text-align:center;width:100%}@media (max-width:959.98px){.bd-header-announcement{padding:.5rem 2%}}.bd-header-announcement p{font-weight:700;margin:0}.bd-header-announcement:after{background-color:var(--pst-color-info);content:"";height:100%;left:0;opacity:.2;position:absolute;top:0;width:100%;z-index:-1}.bd-header-announcement:empty{display:none}.bd-main{display:flex;flex-direction:column;flex-grow:1;min-width:0}.bd-main .bd-content{display:flex;height:100%;justify-content:center}.bd-main .bd-content .bd-article-container{display:flex;flex-direction:column;justify-content:start;max-width:60em;overflow-x:auto;padding:1rem;width:100%}@media (min-width:1200px){.bd-main .bd-content .bd-article-container .bd-article{padding-left:2rem;padding-top:1.5rem}}.bd-footer{border-top:1px solid var(--pst-color-border);width:100%}.bd-footer .bd-footer__inner{display:flex;flex-grow:1;margin:auto;padding:1rem}.bd-footer .footer-items__end,.bd-footer .footer-items__start{display:flex;flex-direction:column;gap:.5rem;justify-content:center}.bd-footer .footer-items__end{margin-left:auto}.bd-footer .footer-item p{margin-bottom:0}.bd-footer-article{display:flex;margin-top:auto}.bd-header{background:var(--pst-color-on-background)!important;box-shadow:0 .125rem .25rem 0 var(--pst-color-shadow);justify-content:center;max-width:100vw;padding:0;position:sticky;top:0;width:100%;z-index:1030}.bd-header .bd-header__inner{align-items:center;display:flex;height:fit-content;padding-left:1rem;padding-right:1rem}.bd-header .navbar-item{align-items:center;display:flex;height:var(--pst-header-height);max-height:var(--pst-header-height)}.bd-header .navbar-header-items{flex-shrink:1}@media (min-width:960px){.bd-header .navbar-header-items{display:flex;flex-grow:1;padding:0 0 0 .5rem}}.bd-header .navbar-header-items__center,.bd-header .navbar-header-items__end,.bd-header .navbar-header-items__start{align-items:center;display:flex;flex-flow:wrap;row-gap:0}.bd-header .navbar-header-items__center,.bd-header .navbar-header-items__end{column-gap:1rem}.bd-header .navbar-header-items__start{flex-shrink:0;gap:.5rem;margin-right:auto}.bd-header .navbar-header-items__end{justify-content:end}.bd-header .navbar-nav{display:flex}@media (min-width:960px){.bd-header .navbar-nav{align-items:center}}.bd-header .navbar-nav li a.nav-link{color:var(--pst-color-text-muted)}.bd-header .navbar-nav li a.nav-link:focus,.bd-header .navbar-nav li a.nav-link:hover{color:var(--pst-color-primary)}.bd-header .navbar-nav>.active>.nav-link{color:var(--pst-color-primary);font-weight:600}.bd-header .navbar-nav .dropdown button{border:none;color:var(--pst-color-text-muted);display:unset}.bd-header .navbar-nav .dropdown .dropdown-menu{background-color:var(--pst-color-on-background);border:1px solid var(--pst-color-border);box-shadow:0 0 .3rem .1rem var(--pst-color-shadow);margin:.5rem 0;min-width:20rem;padding:.5rem 1rem;z-index:1070}.bd-header .navbar-nav .dropdown .dropdown-menu:not(.show){display:none}@media (min-width:960px){.navbar-center-items .navbar-item{display:inline-block}}.toc-entry>.nav-link.active{background-color:transparent;border-left:2px solid var(--pst-color-primary);color:var(--pst-color-primary);font-weight:600}.nav-link:hover{border-style:none}.nav-link.nav-external:after{content:var(--pst-icon-external-link);font-family:Font Awesome\ 6 Free;font-size:.75em;font-weight:900;margin-left:.3em}.bd-navbar-elements li.nav-item i{font-size:.7rem;padding-left:2px;vertical-align:middle}.bd-header label.sidebar-toggle{align-items:center;color:var(--pst-color-muted);cursor:pointer;display:flex;font-size:var(--pst-font-size-icon);margin-bottom:0}.bd-header label.primary-toggle{padding-right:1rem}@media (min-width:960px){.bd-header label.primary-toggle{display:none}}.bd-header label.secondary-toggle{padding-left:1rem}@media (min-width:1200px){.bd-header label.secondary-toggle{display:none}}.bd-header .navbar-header-items{display:none}@media (min-width:960px){.bd-header .navbar-header-items{display:inherit}}.navbar-persistent--mobile{margin-left:auto}@media (min-width:960px){.navbar-persistent--mobile{display:none}}.navbar-persistent--container{display:none}@media (min-width:960px){.navbar-persistent--container{display:flex}}.header-article__inner{display:flex;padding:0 .5rem}.header-article__inner .header-article-item{height:var(--pst-header-article-height);min-height:var(--pst-header-article-height)}.header-article__inner .header-article-items__end,.header-article__inner .header-article-items__start{align-items:start;display:flex;gap:.5rem}.header-article__inner .header-article-items__end{margin-left:auto}.bd-sidebar-primary{background-color:var(--pst-color-background);border-right:1px solid var(--pst-color-border);display:flex;flex:0 0 auto;flex-direction:column;font-size:var(--pst-sidebar-font-size-mobile);gap:1rem;max-height:calc(100vh - var(--pst-header-height));overflow-y:auto;padding:2rem 1rem 1rem;position:sticky;top:var(--pst-header-height);width:25%}.bd-sidebar-primary::-webkit-scrollbar{height:.5rem;width:.5rem}.bd-sidebar-primary::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted);border-radius:.25rem}.bd-sidebar-primary::-webkit-scrollbar-track{background:transparent}.bd-sidebar-primary::-webkit-scrollbar-thumb{background:var(--pst-color-on-surface)}.bd-sidebar-primary::-webkit-scrollbar-thumb:hover,.bd-sidebar-primary:hover::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted)}@media (min-width:960px){.bd-sidebar-primary{font-size:var(--pst-sidebar-font-size)}}.bd-sidebar-primary .nav-link{font-size:var(--pst-sidebar-font-size-mobile)}.bd-sidebar-primary.no-sidebar{border-right:0}@media (min-width:960px){.bd-sidebar-primary.hide-on-wide{display:none}}.bd-sidebar-primary h1,.bd-sidebar-primary h2,.bd-sidebar-primary h3,.bd-sidebar-primary h4{color:var(--pst-color-text-base)}.bd-sidebar-primary .sidebar-primary-items__end .sidebar-primary-item,.bd-sidebar-primary .sidebar-primary-items__start .sidebar-primary-item{padding:.5rem 0}.bd-sidebar-primary .sidebar-header-items{display:flex;flex-direction:column}.bd-sidebar-primary .sidebar-header-items .sidebar-header-items__title{color:var(--pst-color-text-base);font-size:var(--pst-sidebar-header-font-size);font-weight:var(--pst-sidebar-header-font-weight);margin-bottom:.5rem}.bd-sidebar-primary .sidebar-header-items .nav-item.dropdown button{display:none}.bd-sidebar-primary .sidebar-header-items .nav-item.dropdown .dropdown-menu{background-color:inherit;border:none;display:flex;flex-direction:column;font-size:inherit;margin:0;padding:0}.bd-sidebar-primary .sidebar-header-items .sidebar-header-items__center{display:flex;flex-direction:column}.bd-sidebar-primary .sidebar-header-items .sidebar-header-items__end{align-items:center;display:flex;gap:.5rem}@media (min-width:960px){.bd-sidebar-primary .sidebar-header-items{display:none}}.bd-sidebar-primary .sidebar-primary-items__start{border-top:1px solid var(--pst-color-border)}@media (min-width:960px){.bd-sidebar-primary .sidebar-primary-items__start{border-top:none}}.bd-sidebar-primary .sidebar-primary-items__end{margin-bottom:1em;margin-top:auto}.bd-sidebar-primary .list-caption{list-style:none;padding-left:0}.bd-sidebar-primary li{position:relative}.bd-sidebar-primary li.has-children>.reference{padding-right:30px}.bd-sidebar-primary label.toctree-toggle{align-items:center;cursor:pointer;display:flex;height:30px;justify-content:center;position:absolute;right:0;top:0;width:30px}.bd-sidebar-primary label.toctree-toggle:hover{background:var(--pst-color-surface)}.bd-sidebar-primary label.toctree-toggle i{display:inline-block;font-size:.75rem;text-align:center}.bd-sidebar-primary label.toctree-toggle i:hover{color:var(--pst-color-primary)}.bd-sidebar-primary .label-parts{height:100%;width:100%}.bd-sidebar-primary .label-parts:hover{background:none}.bd-sidebar-primary .label-parts i{position:absolute;right:0;top:.3em;width:30px}nav.bd-links{margin-right:-1rem}@media (min-width:960px){nav.bd-links{display:block}}nav.bd-links ul{list-style:none}nav.bd-links ul ul{padding:0 0 0 1rem}nav.bd-links li>a{color:var(--pst-color-text-muted);display:block;padding:.25rem 0}nav.bd-links li>a:hover{background-color:transparent;color:var(--pst-color-primary);text-decoration:none}nav.bd-links li>a.reference.external:after{content:var(--pst-icon-external-link);font-family:Font Awesome\ 6 Free;font-size:.75em;font-weight:900;margin-left:.3em}nav.bd-links .active:hover>a,nav.bd-links .active>a{color:var(--pst-color-primary);font-weight:600}nav.bd-links p.bd-links__title{font-size:var(--pst-sidebar-header-font-size)}nav.bd-links p.bd-links__title,nav.bd-links p.caption{font-weight:var(--pst-sidebar-header-font-weight);margin-bottom:.5rem}nav.bd-links p.caption{color:var(--pst-color-text-base);font-size:var(--pst-sidebar-font-size-mobile);margin-top:1.25rem;position:relative}nav.bd-links p.caption:first-child{margin-top:0}@media (min-width:960px){nav.bd-links p.caption{font-size:var(--pst-sidebar-font-size)}}.bd-sidebar-secondary{background-color:var(--pst-color-background);display:flex;flex-direction:column;flex-shrink:0;font-size:var(--pst-sidebar-font-size-mobile);max-height:calc(100vh - var(--pst-header-height));order:2;overflow-y:auto;padding:2rem 1rem 1rem;position:sticky;top:var(--pst-header-height);width:var(--pst-sidebar-secondary)}@media (min-width:1200px){.bd-sidebar-secondary{font-size:var(--pst-sidebar-font-size)}}.bd-sidebar-secondary::-webkit-scrollbar{height:.5rem;width:.5rem}.bd-sidebar-secondary::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted);border-radius:.25rem}.bd-sidebar-secondary::-webkit-scrollbar-track{background:transparent}.bd-sidebar-secondary::-webkit-scrollbar-thumb{background:var(--pst-color-on-surface)}.bd-sidebar-secondary::-webkit-scrollbar-thumb:hover,.bd-sidebar-secondary:hover::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted)}.sidebar-secondary-item{padding:.5rem}@media (min-width:1200px){.sidebar-secondary-item{border-left:1px solid var(--pst-color-border);padding-left:1rem}}.sidebar-secondary-item i{padding-right:.5rem}input.sidebar-toggle{display:none}label.overlay{background-color:#000;height:0;left:0;opacity:.5;position:fixed;top:0;transition:opacity .2s ease-out;width:0;z-index:1050}input#__primary:checked+label.overlay.overlay-primary,input#__secondary:checked+label.overlay.overlay-secondary{height:100vh;width:100vw}input#__primary:checked~.bd-container .bd-sidebar-primary{margin-left:0;visibility:visible}input#__secondary:checked~.bd-container .bd-sidebar-secondary{margin-right:0;visibility:visible}@media (min-width:960px){label.sidebar-toggle.primary-toggle{display:none}input#__primary:checked+label.overlay.overlay-primary{height:0;width:0}.bd-sidebar-primary{margin-left:0;visibility:visible}}@media (max-width:959.98px){.bd-sidebar-primary{flex-grow:0.75;height:100vh;left:0;margin-left:-75%;max-height:100vh;max-width:350px;position:fixed;top:0;transition:visibility .2s ease-out,margin .2s ease-out;visibility:hidden;width:75%;z-index:1055}}@media (max-width:1199.98px){.bd-sidebar-secondary{flex-grow:0.75;height:100vh;margin-right:-75%;max-height:100vh;max-width:350px;position:fixed;right:0;top:0;transition:visibility .2s ease-out,margin .2s ease-out;visibility:hidden;width:75%;z-index:1055}}ul.bd-breadcrumbs{display:flex;flex-wrap:wrap;font-size:.8rem;list-style:none;padding-left:0}ul.bd-breadcrumbs li.breadcrumb-item{align-items:center;display:flex;font-weight:700}ul.bd-breadcrumbs li.breadcrumb-item a{color:var(--pst-color-link)}ul.bd-breadcrumbs li.breadcrumb-item:not(.breadcrumb-home):before{color:var(--pst-color-text-muted);content:var(--pst-breadcrumb-divider);font-family:Font Awesome\ 6 Free;font-size:.8rem;font-weight:900;padding:0 .5rem}.navbar-icon-links{column-gap:1rem;display:flex;flex-direction:row;flex-wrap:wrap}.navbar-icon-links li.nav-item a.nav-link{padding-left:0;padding-right:0}.navbar-icon-links a span{align-items:center;display:flex}.navbar-icon-links i.fa-brands,.navbar-icon-links i.fa-regular,.navbar-icon-links i.fa-solid{font-size:var(--pst-font-size-icon);font-style:normal;vertical-align:middle}.navbar-icon-links i.fa-square-twitter:before{color:#55acee}.navbar-icon-links i.fa-square-gitlab:before{color:#548}.navbar-icon-links i.fa-bitbucket:before{color:#0052cc}.navbar-icon-links img.icon-link-image{border-radius:.2rem;height:1.5em}.navbar-brand{align-items:center;display:flex;flex-shrink:0;gap:.5rem;height:var(--pst-header-height);margin:0;max-height:var(--pst-header-height);padding:.5rem 0;position:relative;width:auto}.navbar-brand p{margin-bottom:0}.navbar-brand img{height:100%;max-width:100%;width:auto}.navbar-nav ul{display:block;list-style:none}.navbar-nav ul ul{padding:0 0 0 1rem}.navbar-nav li{display:flex;flex-direction:column}.navbar-nav li a{align-items:center;color:var(--pst-color-text-muted);display:flex;height:100%;padding-bottom:.25rem;padding-top:.25rem}.navbar-nav li a:hover{border-style:none;text-decoration-line:none}.navbar-nav li a:focus,.navbar-nav li a:hover,.navbar-nav li.current>a{color:var(--pst-color-primary)}.navbar-nav li.current>a{font-weight:600}.navbar-nav .toctree-checkbox{display:none;position:absolute}.navbar-nav .toctree-checkbox~ul{display:none}.navbar-nav .toctree-checkbox~label i{transform:rotate(0deg)}.navbar-nav .toctree-checkbox:checked~ul{display:block}.navbar-nav .toctree-checkbox:checked~label i{transform:rotate(180deg)}.bd-header .navbar-nav>p.sidebar-header-items__title{display:none}.page-toc .section-nav{border-bottom:none;padding-left:0}.page-toc .section-nav ul{padding-left:1rem}.page-toc .nav-link{font-size:var(--pst-sidebar-font-size-mobile)}@media (min-width:1200px){.page-toc .nav-link{font-size:var(--pst-sidebar-font-size)}}.page-toc .onthispage{color:var(--pst-color-text-base);font-weight:var(--pst-sidebar-header-font-weight);margin-bottom:.5rem}.prev-next-area{width:100%}.prev-next-area p{line-height:1.3em;margin:0 .3em}.prev-next-area i{font-size:1.2em}.prev-next-area a{align-items:center;border:none;color:var(--pst-color-text-muted);display:flex;max-width:45%;overflow-x:hidden;padding:10px;text-decoration:none}.prev-next-area a p.prev-next-title{color:var(--pst-color-primary);font-size:1.1em;font-weight:var(--pst-admonition-font-weight-heading)}.prev-next-area a:hover p.prev-next-title{text-decoration:underline}.prev-next-area a .prev-next-info{flex-direction:column;margin:0 .5em}.prev-next-area a .prev-next-info .prev-next-subtitle{text-transform:capitalize}.prev-next-area a.left-prev{float:left}.prev-next-area a.right-next{float:right}.prev-next-area a.right-next div.prev-next-info{text-align:right}.bd-search{margin-left:-15px;margin-right:-15px;padding:1rem 15px;position:relative}.bd-search .icon{color:var(--pst-color-border);left:25px;position:absolute}.bd-search i.fa-solid.fa-magnifying-glass{color:var(--pst-color-text-muted);left:1.6rem;position:absolute}.bd-search input{background-color:var(--pst-color-background);border:1px solid var(--pst-color-border);border-radius:.25rem;color:var(--pst-color-text-base);padding-left:2.5rem}.bd-search input::placeholder{color:var(--pst-color-border)}.bd-search input:active,.bd-search input:focus{background-color:var(--pst-color-background);color:var(--pst-color-text-base)}.bd-search input::-webkit-search-cancel-button,.bd-search input::-webkit-search-decoration{-webkit-appearance:none;appearance:none}.bd-search .search-button__kbd-shortcut{color:var(--pst-color-border);display:flex;position:absolute;right:2em}.search-button{align-content:center;align-items:center;color:var(--pst-color-text-muted);display:flex;padding:0}.search-button:hover{color:var(--pst-color-primary)}.search-button i{font-size:1.3rem}.search-button__overlay,.search-button__search-container{display:none}.search-button__wrapper.show .search-button__search-container{display:flex;left:50%;margin-top:.5rem;max-width:800px;position:fixed;right:1rem;top:30%;transform:translate(-50%,-50%);width:90%;z-index:1055}.search-button__wrapper.show .search-button__overlay{background-color:#000;display:flex;height:100%;left:0;opacity:.5;position:fixed;top:0;width:100%;z-index:1050}.search-button__wrapper.show form.bd-search{flex-grow:1;padding-bottom:0;padding-top:0}.search-button__wrapper.show i,.search-button__wrapper.show input{font-size:var(--pst-font-size-icon)}div#searchbox p.highlight-link{box-shadow:0 .2rem .5rem var(--pst-color-shadow),0 0 .0625rem var(--pst-color-shadow)!important;margin:1rem 0;width:fit-content}@media (min-width:1200px){div#searchbox p.highlight-link{margin-left:2rem}}div#searchbox p.highlight-link a{background-color:var(--pst-color-primary);border-radius:.25rem;color:#fff;font-size:1.25rem;padding:.75rem;transition:box-shadow .25s ease-out}div#searchbox p.highlight-link a:hover{box-shadow:inset 0 0 50px 50px rgba(0,0,0,.25);text-decoration:none}div#searchbox p.highlight-link a:before{color:unset;content:var(--pst-icon-search-minus);font-family:Font Awesome\ 6 Free;font-weight:900;margin-right:.5rem}.theme-switch-button{border-color:var(--pst-color-on-background);font-size:calc(var(--pst-font-size-icon) - .1rem);margin:0 -.5rem;padding:0}.theme-switch-button span{color:var(--pst-color-text-muted);display:none;padding:.5rem}.theme-switch-button span:active,.theme-switch-button span:focus,.theme-switch-button span:hover{text-decoration:none}.theme-switch-button:active,.theme-switch-button:hover{background-color:var(--pst-color-on-surface)!important;border-color:var(--pst-color-on-background)!important}.theme-switch-button:active a,.theme-switch-button:hover a{color:var(--pst-color-text-muted)}.bd-sidebar-primary .theme-switch-button{border-color:var(--pst-color-background)}.bd-sidebar-primary .theme-switch-button:active,.bd-sidebar-primary .theme-switch-button:hover{border-color:var(--pst-color-background)!important}html[data-mode=auto] .theme-switch-button span[data-mode=auto],html[data-mode=dark] .theme-switch-button span[data-mode=dark],html[data-mode=light] .theme-switch-button span[data-mode=light]{display:flex}button.btn.version-switcher__button{border-color:var(--pst-color-border);color:var(--pst-color-text-base);margin-bottom:1em}@media (min-width:960px){button.btn.version-switcher__button{margin-bottom:unset}}button.btn.version-switcher__button:active{border-color:var(--pst-color-border);color:var(--pst-color-text-base)}.version-switcher__menu{border-color:var(--pst-color-border);border-radius:var(--bs-dropdown-border-radius)}.version-switcher__menu a.list-group-item{background-color:var(--pst-color-on-background);color:var(--pst-color-text-base);padding:.75rem 1.25rem}.version-switcher__menu a.list-group-item:not(:last-child){border-bottom:1px solid var(--pst-color-border)}.version-switcher__menu a.list-group-item:hover{background-color:var(--pst-color-surface)}.version-switcher__menu a.list-group-item.active{color:var(--pst-color-primary)}.version-switcher__menu a.list-group-item.active span:before{background-color:var(--pst-color-primary);content:"";height:100%;left:0;opacity:.1;position:absolute;top:0;width:100%}.version-switcher__menu,button.version-switcher__button{font-size:1.1em}@media (min-width:960px){.version-switcher__menu,button.version-switcher__button{font-size:unset}}nav.page-toc{margin-bottom:1rem}.bd-toc .nav .nav,.list-caption .nav{display:none}.bd-toc .nav .nav.visible,.bd-toc .nav>.active>ul,.list-caption .nav.visible,.list-caption>.active>ul,.toc-entry{display:block}.toc-entry a.nav-link{color:var(--pst-color-text-muted);display:block;margin-left:-1rem;padding:.125rem 0 .125rem 1rem}.toc-entry a.nav-link:hover{color:var(--pst-color-primary);text-decoration:none}.toc-entry a.nav-link.active{background-color:transparent;border-left:2px solid var(--pst-color-primary);color:var(--pst-color-primary);font-weight:600}div.deprecated,div.versionadded,div.versionchanged{background-color:var(--pst-color-on-background);border-left:.2rem solid;border-color:var(--pst-color-info);border-radius:.25rem;box-shadow:0 .2rem .5rem var(--pst-color-shadow),0 0 .0625rem var(--pst-color-shadow)!important;margin:1.5625em auto;overflow:hidden;padding:0 .6rem;page-break-inside:avoid;position:relative;transition:color .25s,background-color .25s,border-color .25s;vertical-align:middle}div.deprecated>p,div.versionadded>p,div.versionchanged>p{margin-bottom:.6rem;margin-top:.6rem}div.deprecated>p:before,div.versionadded>p:before,div.versionchanged>p:before{background-color:var(--pst-color-info);content:"";height:100%;left:0;opacity:.1;pointer-events:none;position:absolute;top:0;width:100%}div.versionadded{border-color:var(--pst-color-success)}div.versionadded p:before{background-color:var(--pst-color-success)}div.versionchanged{border-color:var(--pst-color-warning)}div.versionchanged p:before{background-color:var(--pst-color-warning)}div.deprecated{border-color:var(--pst-color-danger)}div.deprecated p:before{background-color:var(--pst-color-danger)}span.versionmodified{font-weight:600}span.versionmodified:before{color:var(--pst-color-info);content:var(--pst-icon-versionmodified-default);font-family:Font Awesome\ 6 Free;font-style:normal;font-weight:900;margin-right:.6rem}span.versionmodified.added:before{color:var(--pst-color-success);content:var(--pst-icon-versionmodified-added)}span.versionmodified.changed:before{color:var(--pst-color-warning);content:var(--pst-icon-versionmodified-changed)}span.versionmodified.deprecated:before{color:var(--pst-color-danger);content:var(--pst-icon-versionmodified-deprecated)}.sidebar-indices-items{border-top:1px solid var(--pst-color-border);display:flex;flex-direction:column}@media (min-width:960px){.sidebar-indices-items{border-top:none}}.sidebar-indices-items .sidebar-indices-items__title{color:var(--pst-color-text-base);font-size:var(--pst-sidebar-header-font-size);font-weight:var(--pst-sidebar-header-font-weight);margin-bottom:.5rem}.sidebar-indices-items ul.indices-link{list-style:none;margin-right:-1rem;padding:0}.sidebar-indices-items ul.indices-link li>a{color:var(--pst-color-text-muted);display:block;padding:.25rem 0}.sidebar-indices-items ul.indices-link li>a:hover{background-color:transparent;color:var(--pst-color-primary);text-decoration:none}.bd-sidebar-primary div#rtd-footer-container{bottom:-1rem;margin:-1rem;position:sticky}.bd-sidebar-primary div#rtd-footer-container .rst-versions.rst-badge{font-family:var(--pst-font-family-base);font-size:.9em;max-width:unset;position:unset}.bd-sidebar-primary div#rtd-footer-container .rst-versions.rst-badge .rst-current-version{align-items:center;background-color:var(--pst-color-background);border-top:1px solid var(--pst-color-border);color:var(--pst-color-success);display:flex;gap:.2rem;height:2.5rem;transition:background-color .2s ease-out}.bd-sidebar-primary div#rtd-footer-container .rst-versions.rst-badge .fa.fa-book{color:var(--pst-color-text-muted);margin-right:auto}.bd-sidebar-primary div#rtd-footer-container .rst-versions.rst-badge .fa.fa-book:after{color:var(--pst-color-text-base);content:"Read The Docs";font-family:var(--pst-font-family-base);font-weight:var(--pst-admonition-font-weight-heading)}.bd-sidebar-primary div#rtd-footer-container .rst-versions.rst-badge .fa.fa-caret-down{color:var(--pst-color-text-muted)}.bd-sidebar-primary div#rtd-footer-container .rst-versions.rst-badge.shift-up .rst-current-version{border-bottom:1px solid var(--pst-color-border)}.bd-sidebar-primary div#rtd-footer-container .rst-other-versions{background-color:var(--pst-color-surface);color:var(--pst-color-text-base)}.bd-sidebar-primary div#rtd-footer-container .rst-other-versions dl dd a{color:var(--pst-color-text-muted)}.bd-sidebar-primary div#rtd-footer-container .rst-other-versions hr{background-color:var(--pst-color-border)}.bd-sidebar-primary div#rtd-footer-container .rst-other-versions small a{color:var(--pst-color-link)}.bd-sidebar-primary div#rtd-footer-container .rst-other-versions input{background-color:var(--pst-color-surface);border:1px solid var(--pst-color-border);padding-left:.5rem}.admonition,div.admonition{background-color:var(--pst-color-on-background);border-left:.2rem solid;border-color:var(--pst-color-info);border-radius:.25rem;box-shadow:0 .2rem .5rem var(--pst-color-shadow),0 0 .0625rem var(--pst-color-shadow)!important;margin:1.5625em auto;overflow:hidden;padding:0 .6rem .8rem;page-break-inside:avoid}.admonition :last-child,div.admonition :last-child{margin-bottom:0}.admonition p.admonition-title~*,div.admonition p.admonition-title~*{margin-left:1.4rem;margin-right:1.4rem}.admonition>ol,.admonition>ul,div.admonition>ol,div.admonition>ul{margin-left:1em}.admonition>.admonition-title,div.admonition>.admonition-title{font-weight:var(--pst-admonition-font-weight-heading);margin:0 -.6rem;padding:.4rem .6rem .4rem 2rem;position:relative}.admonition>.admonition-title:after,div.admonition>.admonition-title:after{color:var(--pst-color-info);content:var(--pst-icon-admonition-default);font-family:Font Awesome\ 6 Free;font-weight:900;height:1rem;left:.5rem;opacity:1;position:absolute;width:1rem}.admonition>.admonition-title:before,div.admonition>.admonition-title:before{background-color:var(--pst-color-info);content:"";height:100%;left:0;opacity:.1;pointer-events:none;position:absolute;top:0;width:100%}.admonition>.admonition-title+*,div.admonition>.admonition-title+*{margin-top:.4em}.admonition.attention,div.admonition.attention{border-color:var(--pst-color-attention)}.admonition.attention>.admonition-title:before,div.admonition.attention>.admonition-title:before{background-color:var(--pst-color-attention)}.admonition.attention>.admonition-title:after,div.admonition.attention>.admonition-title:after{color:var(--pst-color-attention);content:var(--pst-icon-admonition-attention)}.admonition.caution,div.admonition.caution{border-color:var(--pst-color-warning)}.admonition.caution>.admonition-title:before,div.admonition.caution>.admonition-title:before{background-color:var(--pst-color-warning)}.admonition.caution>.admonition-title:after,div.admonition.caution>.admonition-title:after{color:var(--pst-color-warning);content:var(--pst-icon-admonition-caution)}.admonition.warning,div.admonition.warning{border-color:var(--pst-color-warning)}.admonition.warning>.admonition-title:before,div.admonition.warning>.admonition-title:before{background-color:var(--pst-color-warning)}.admonition.warning>.admonition-title:after,div.admonition.warning>.admonition-title:after{color:var(--pst-color-warning);content:var(--pst-icon-admonition-warning)}.admonition.danger,div.admonition.danger{border-color:var(--pst-color-danger)}.admonition.danger>.admonition-title:before,div.admonition.danger>.admonition-title:before{background-color:var(--pst-color-danger)}.admonition.danger>.admonition-title:after,div.admonition.danger>.admonition-title:after{color:var(--pst-color-danger);content:var(--pst-icon-admonition-danger)}.admonition.error,div.admonition.error{border-color:var(--pst-color-danger)}.admonition.error>.admonition-title:before,div.admonition.error>.admonition-title:before{background-color:var(--pst-color-danger)}.admonition.error>.admonition-title:after,div.admonition.error>.admonition-title:after{color:var(--pst-color-danger);content:var(--pst-icon-admonition-error)}.admonition.hint,div.admonition.hint{border-color:var(--pst-color-success)}.admonition.hint>.admonition-title:before,div.admonition.hint>.admonition-title:before{background-color:var(--pst-color-success)}.admonition.hint>.admonition-title:after,div.admonition.hint>.admonition-title:after{color:var(--pst-color-success);content:var(--pst-icon-admonition-hint)}.admonition.tip,div.admonition.tip{border-color:var(--pst-color-success)}.admonition.tip>.admonition-title:before,div.admonition.tip>.admonition-title:before{background-color:var(--pst-color-success)}.admonition.tip>.admonition-title:after,div.admonition.tip>.admonition-title:after{color:var(--pst-color-success);content:var(--pst-icon-admonition-tip)}.admonition.important,div.admonition.important{border-color:var(--pst-color-attention)}.admonition.important>.admonition-title:before,div.admonition.important>.admonition-title:before{background-color:var(--pst-color-attention)}.admonition.important>.admonition-title:after,div.admonition.important>.admonition-title:after{color:var(--pst-color-attention);content:var(--pst-icon-admonition-important)}.admonition.note,div.admonition.note{border-color:var(--pst-color-info)}.admonition.note>.admonition-title:before,div.admonition.note>.admonition-title:before{background-color:var(--pst-color-info)}.admonition.note>.admonition-title:after,div.admonition.note>.admonition-title:after{color:var(--pst-color-info);content:var(--pst-icon-admonition-note)}.admonition.seealso,div.admonition.seealso{border-color:var(--pst-color-success)}.admonition.seealso>.admonition-title:before,div.admonition.seealso>.admonition-title:before{background-color:var(--pst-color-success)}.admonition.seealso>.admonition-title:after,div.admonition.seealso>.admonition-title:after{color:var(--pst-color-success);content:var(--pst-icon-admonition-seealso)}.admonition.admonition-todo,div.admonition.admonition-todo{border-color:var(--pst-color-border)}.admonition.admonition-todo>.admonition-title:before,div.admonition.admonition-todo>.admonition-title:before{background-color:var(--pst-color-border)}.admonition.admonition-todo>.admonition-title:after,div.admonition.admonition-todo>.admonition-title:after{color:var(--pst-color-border);content:var(--pst-icon-admonition-todo)}.admonition.sidebar,div.admonition.sidebar{border-width:0 0 0 .2rem;clear:both;float:right;margin-left:.5rem;margin-top:0;max-width:40%}.admonition.sidebar.attention,.admonition.sidebar.important,div.admonition.sidebar.attention,div.admonition.sidebar.important{border-color:var(--pst-color-attention)}.admonition.sidebar.caution,.admonition.sidebar.warning,div.admonition.sidebar.caution,div.admonition.sidebar.warning{border-color:var(--pst-color-warning)}.admonition.sidebar.danger,.admonition.sidebar.error,div.admonition.sidebar.danger,div.admonition.sidebar.error{border-color:var(--pst-color-danger)}.admonition.sidebar.hint,.admonition.sidebar.seealso,.admonition.sidebar.tip,div.admonition.sidebar.hint,div.admonition.sidebar.seealso,div.admonition.sidebar.tip{border-color:var(--pst-color-success)}.admonition.sidebar.note,.admonition.sidebar.todo,div.admonition.sidebar.note,div.admonition.sidebar.todo{border-color:var(--pst-color-info)}.admonition.sidebar p.admonition-title~*,div.admonition.sidebar p.admonition-title~*{margin-left:0;margin-right:0}aside.topic,div.topic,div.topic.contents,nav.contents{background-color:var(--pst-color-surface);border-color:var(--pst-color-border);border-radius:.25rem;box-shadow:0 .2rem .5rem var(--pst-color-shadow),0 0 .0625rem var(--pst-color-shadow)!important;display:flex;flex-direction:column;padding:1rem 1.25rem}aside.topic .topic-title,div.topic .topic-title,div.topic.contents .topic-title,nav.contents .topic-title{margin:0 0 .5rem}aside.topic ul.simple,div.topic ul.simple,div.topic.contents ul.simple,nav.contents ul.simple{padding-left:1rem}aside.topic ul.simple ul,div.topic ul.simple ul,div.topic.contents ul.simple ul,nav.contents ul.simple ul{padding-left:2em}aside.sidebar{background-color:var(--pst-color-surface);border:1px solid var(--pst-color-border);border-radius:.25rem;margin-left:.5rem;padding:0}aside.sidebar>:last-child{padding-bottom:1rem}aside.sidebar p.sidebar-title{border-bottom:1px solid var(--pst-color-border);font-family:var(--pst-font-family-heading);font-weight:var(--pst-admonition-font-weight-heading);margin-bottom:0;padding-bottom:.5rem;padding-top:.5rem;position:relative}aside.sidebar>:not(.sidebar-title):first-child,aside.sidebar>p.sidebar-title+*{margin-top:1rem}aside.sidebar>*{padding-left:1rem;padding-right:1rem}p.rubric{display:flex;flex-direction:column}.seealso dd{margin-bottom:0;margin-top:0}table.field-list{border-collapse:separate;border-spacing:10px;margin-left:1px}table.field-list th.field-name{background-color:var(--pst-color-surface);padding:1px 8px 1px 5px;white-space:nowrap}table.field-list td.field-body p{font-style:italic}table.field-list td.field-body p>strong{font-style:normal}table.field-list td.field-body blockquote{border-left:none;margin:0 0 .3em;padding-left:30px}.table.autosummary td:first-child{white-space:nowrap}.sig{font-family:var(--pst-font-family-monospace)}.sig-inline.c-texpr,.sig-inline.cpp-texpr{font-family:unset}.sig.c .k,.sig.c .kt,.sig.c .m,.sig.c .s,.sig.c .sc,.sig.cpp .k,.sig.cpp .kt,.sig.cpp .m,.sig.cpp .s,.sig.cpp .sc{color:var(--pst-color-text-base)}.sig-name{color:var(--pst-color-inline-code)}dt:target,span.highlighted{background-color:var(--pst-color-target)}.viewcode-back{font-family:var(--pst-font-family-base)}.viewcode-block:target{background-color:var(--pst-color-target);border-bottom:1px solid var(--pst-color-border);border-top:1px solid var(--pst-color-border);position:relative}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd{margin-left:2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>dl.simple>dt{display:flex}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl.field-list{display:grid;grid-template-columns:unset}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dt.field-even,dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dt.field-odd{background-color:var(--pst-color-surface);margin-bottom:.1rem;margin-top:.2rem}div.highlight,div.literal-block-wrapper,div[class*=highlight-]{border-radius:.25rem;display:flex;flex-direction:column;width:unset}div.literal-block-wrapper{border:1px solid var(--pst-color-border);border-radius:.25rem}div.literal-block-wrapper div.code-block-caption{border-bottom:1px solid var(--pst-color-border);font-size:1rem;font-weight:var(--pst-font-weight-caption);margin:0;padding:.5rem}div.literal-block-wrapper div.code-block-caption a.headerlink{font-size:inherit}div.literal-block-wrapper div[class*=highlight-]{border-radius:0;margin:0}div.literal-block-wrapper div[class*=highlight-] pre{border:none;box-shadow:none}code.literal{background-color:var(--pst-color-surface);border:1px solid var(--pst-color-on-surface);border-radius:.25rem;padding:.1rem .25rem}figure a.headerlink{font-size:inherit;position:absolute}figure:hover a.headerlink{visibility:visible}figure figcaption{color:var(--pst-color-text-muted);font-family:var(--pst-font-family-heading);font-weight:var(--pst-font-weight-caption);margin-left:auto;margin-right:auto}figure figcaption table.table{margin-left:auto;margin-right:auto;width:fit-content}dt.label>span.brackets:not(:only-child):before{content:"["}dt.label>span.brackets:not(:only-child):after{content:"]"}a.footnote-reference{font-size:small;vertical-align:super}aside.footnote{margin-bottom:.5rem}aside.footnote:last-child{margin-bottom:1rem}aside.footnote span.backrefs,aside.footnote span.label{font-weight:700}aside.footnote:target{background-color:var(--pst-color-target)}div.doctest>div.highlight span.gp,span.linenos,table.highlighttable td.linenos{user-select:none;-webkit-user-select:text;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none}dd{margin-bottom:10px;margin-left:30px;margin-top:3px}ol,ul{padding-inline-start:2rem}ol li>p:first-child,ul li>p:first-child{margin-bottom:.25rem;margin-top:.25rem}blockquote{border-left:.25em solid var(--pst-color-border);border-radius:.25rem;padding:1em;position:relative}blockquote,blockquote p{color:var(--pst-color-text-muted)}blockquote .line-block{margin:0}blockquote p:last-child{margin-bottom:0}blockquote:before{background-color:var(--pst-color-border);content:"";height:100%;left:0;opacity:.1;pointer-events:none;position:absolute;top:0;width:100%;z-index:-1}span.guilabel{border:1px solid var(--pst-color-info);border-radius:4px;color:var(--pst-color-info);font-size:80%;font-weight:700;margin:auto 2px;padding:2.4px 6px;position:relative}span.guilabel:before{background-color:var(--pst-color-info);content:"";height:100%;left:0;opacity:.1;pointer-events:none;position:absolute;top:0;width:100%}a.reference.download:before{color:var(--pst-color-text-muted);content:var(--pst-icon-download);font-family:Font Awesome\ 6 Free;font-size:.8em;font-weight:600;padding:0 .25em}table{display:table;margin-left:auto;margin-right:auto;max-width:100%;overflow:auto;width:fit-content}table::-webkit-scrollbar{height:.5rem;width:.5rem}table::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted);border-radius:.25rem}table::-webkit-scrollbar-track{background:transparent}table::-webkit-scrollbar-thumb{background:var(--pst-color-on-surface)}table::-webkit-scrollbar-thumb:hover,table:hover::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted)}table.table-right{margin-right:0}table.table-left{margin-left:0}table caption{caption-side:top;color:var(--pst-color-text-muted);text-align:center}td.text-align\:left,th.text-align\:left{text-align:left}td.text-align\:right,th.text-align\:right{text-align:right}td.text-align\:center,th.text-align\:center{text-align:center}.toctree-wrapper p.caption{font-size:1.5em;margin-bottom:0}.toctree-wrapper>ul{padding-left:0}.toctree-wrapper li[class^=toctree-l]{list-style:none;margin-bottom:.2em}.toctree-wrapper li[class^=toctree-l]>a{font-size:1.1em;list-style:none}.toctree-wrapper li[class^=toctree-l]>ul{list-style:none;padding-inline-start:1.5em}.toctree-wrapper .toctree-l1>a{font-size:1.3em}div.topic.contents ul.simple,nav.contents ul.simple{list-style:none;padding-left:0}div.math,span.math{align-items:center;display:flex;max-width:100%;overflow:hidden}span.math{display:inline-flex}div.math{flex-direction:row-reverse;gap:.5em}div.math span.eqno a.headerlink{font-size:1em;position:relative}div.math mjx-container{flex-grow:1;overflow:auto;padding-bottom:.2rem}div.math mjx-container::-webkit-scrollbar{height:.5rem;width:.5rem}div.math mjx-container::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted);border-radius:.25rem}div.math mjx-container::-webkit-scrollbar-track{background:transparent}div.math mjx-container::-webkit-scrollbar-thumb{background:var(--pst-color-on-surface)}div.math mjx-container::-webkit-scrollbar-thumb:hover,div.math mjx-container:hover::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted)}div.math mjx-container mjx-assistive-mml{height:0}.ablog-sidebar-item h2,.ablog-sidebar-item h3{font-size:var(--pst-sidebar-header-font-size);margin-top:.5rem}.ablog-sidebar-item h2 a,.ablog-sidebar-item h3 a{color:var(--pst-color-text-base)}.ablog-sidebar-item ul{display:flex;flex-direction:column;gap:.5em;list-style:none;margin-bottom:0;overflow-y:hidden;padding-left:0}.ablog-sidebar-item ul.ablog-cloud{flex-direction:row;flex-flow:wrap;gap:.5rem}.ablog-sidebar-item ul.ablog-cloud li{align-items:center;display:flex}.ablog__prev-next{display:flex;font-size:1.2em;padding:1rem 0}.ablog__prev-next>span{display:flex;max-width:45%}.ablog__prev-next>span a{align-items:center;display:flex;gap:1rem;line-height:1.5rem;margin-left:auto}.ablog__prev-next>span a i:before{color:var(--pst-color-text-base)}.ablog__prev-next span.ablog__prev i.fa-arrow-circle-left:before{content:var(--pst-icon-angle-left)}.ablog__prev-next span.ablog__spacer{display:none}.ablog__prev-next span.ablog__next{margin-left:auto;text-align:right}.ablog__prev-next span.ablog__next i.fa-arrow-circle-right:before{content:var(--pst-icon-angle-right)}.ablog__collection,.postlist{padding-left:0}.ablog__collection .ablog-post,.postlist .ablog-post{list-style:none}.ablog__collection .ablog-post .ablog-archive,.postlist .ablog-post .ablog-archive{display:flex;flex-direction:row;flex-wrap:wrap;font-size:.75rem;gap:1rem;list-style:none;padding-left:0}.ablog__collection .ablog-post .ablog-post-title,.postlist .ablog-post .ablog-post-title{font-size:1.25rem;margin-top:0}.ablog__collection .ablog-post .ablog-post-title a,.postlist .ablog-post .ablog-post-title a{font-weight:700}.ablog__collection .ablog-post .ablog-post-expand,.postlist .ablog-post .ablog-post-expand{margin-bottom:.5rem}.docutils.container{margin-left:unset;margin-right:unset;max-width:unset;padding-left:unset;padding-right:unset;width:unset}div.highlight button.copybtn{align-items:center;background-color:unset;background-color:var(--pst-color-surface);border:none;display:flex;justify-content:center}div.highlight button.copybtn:not(.success){color:var(--pst-color-muted)}div.highlight button.copybtn:hover:not(.success){color:var(--pst-color-text)}div.highlight button.copybtn.o-tooltip--left:after{background-color:var(--pst-color-surface);color:var(--pst-color-text)}#ethical-ad-placement .ethical-footer a,#ethical-ad-placement .ethical-footer a:active,#ethical-ad-placement .ethical-footer a:hover,#ethical-ad-placement .ethical-footer a:visited,#ethical-ad-placement .ethical-sidebar a,#ethical-ad-placement .ethical-sidebar a:active,#ethical-ad-placement .ethical-sidebar a:hover,#ethical-ad-placement .ethical-sidebar a:visited{color:var(--pst-color-text-base)}#ethical-ad-placement .ethical-footer,#ethical-ad-placement .ethical-sidebar{background-color:var(--pst-color-background);border:1px solid var(--pst-color-border);border-radius:5px;color:var(--pst-color-text-base);font-size:14px;line-height:20px}.bd-content div.jupyter_container{background-color:unset;border:none;box-shadow:none}.bd-content div.jupyter_container div.highlight,.bd-content div.jupyter_container div.output{border-radius:.25rem}.bd-content div.jupyter_container div.highlight{background-color:var(--pst-color-surface)}.bd-content div.jupyter_container .cell_input,.bd-content div.jupyter_container .cell_output{border-radius:.25rem}.bd-content div.jupyter_container .cell_input pre,.bd-content div.jupyter_container .cell_output pre{padding:1rem}.xr-wrap[hidden]{display:block!important}@use "../variables/color" as *;html[data-theme=light]{--pst-color-primary:#459db9;--pst-color-primary-text:#fff;--pst-color-primary-highlight:#306e81;--sd-color-primary:var(--pst-color-primary);--sd-color-primary-text:var(--pst-color-primary-text);--sd-color-primary-highlight:var(--pst-color-primary-highlight);--pst-color-secondary:#ee9040;--pst-color-secondary-text:#fff;--pst-color-secondary-highlight:#cf6912;--sd-color-secondary:var(--pst-color-secondary);--sd-color-secondary-text:var(--pst-color-secondary-text);--sd-color-secondary-highlight:var(--pst-color-secondary-highlight);--pst-color-success:#28a745;--pst-color-success-text:#fff;--pst-color-success-highlight:#19692c;--sd-color-success:var(--pst-color-success);--sd-color-success-text:var(--pst-color-success-text);--sd-color-success-highlight:var(--pst-color-success-highlight);--pst-color-info:#459db9;--pst-color-info-text:#fff;--pst-color-info-highlight:#306e81;--sd-color-info:var(--pst-color-info);--sd-color-info-text:var(--pst-color-info-text);--sd-color-info-highlight:var(--pst-color-info-highlight);--pst-color-warning:#ee9040;--pst-color-warning-text:#fff;--pst-color-warning-highlight:#cf6912;--sd-color-warning:var(--pst-color-warning);--sd-color-warning-text:var(--pst-color-warning-text);--sd-color-warning-highlight:var(--pst-color-warning-highlight);--pst-color-danger:#dc3545;--pst-color-danger-text:#fff;--pst-color-danger-highlight:#a71d2a;--sd-color-danger:var(--pst-color-danger);--sd-color-danger-text:var(--pst-color-danger-text);--sd-color-danger-highlight:var(--pst-color-danger-highlight);--pst-color-light:#c9c9c9;--pst-color-light-text:#000;--pst-color-light-highlight:#a3a3a3;--sd-color-light:var(--pst-color-light);--sd-color-light-text:var(--pst-color-light-text);--sd-color-light-highlight:var(--pst-color-light-highlight);--pst-color-muted:#646464;--pst-color-muted-text:#fff;--pst-color-muted-highlight:#3e3e3e;--sd-color-muted:var(--pst-color-muted);--sd-color-muted-text:var(--pst-color-muted-text);--sd-color-muted-highlight:var(--pst-color-muted-highlight);--pst-color-dark:#323232;--pst-color-dark-text:#fff;--pst-color-dark-highlight:#0c0c0c;--sd-color-dark:var(--pst-color-dark);--sd-color-dark-text:var(--pst-color-dark-text);--sd-color-dark-highlight:var(--pst-color-dark-highlight);--pst-color-black:#000;--pst-color-black-text:#fff;--pst-color-black-highlight:#000;--sd-color-black:var(--pst-color-black);--sd-color-black-text:var(--pst-color-black-text);--sd-color-black-highlight:var(--pst-color-black-highlight);--pst-color-white:#fff;--pst-color-white-text:#000;--pst-color-white-highlight:#d9d9d9;--sd-color-white:var(--pst-color-white);--sd-color-white-text:var(--pst-color-white-text);--sd-color-white-highlight:var(--pst-color-white-highlight)}html[data-theme=dark]{--pst-color-primary:#459db9;--pst-color-primary-text:#fff;--pst-color-primary-highlight:#306e81;--sd-color-primary:var(--pst-color-primary);--sd-color-primary-text:var(--pst-color-primary-text);--sd-color-primary-highlight:var(--pst-color-primary-highlight);--pst-color-secondary:#ee9040;--pst-color-secondary-text:#fff;--pst-color-secondary-highlight:#cf6912;--sd-color-secondary:var(--pst-color-secondary);--sd-color-secondary-text:var(--pst-color-secondary-text);--sd-color-secondary-highlight:var(--pst-color-secondary-highlight);--pst-color-success:#488757;--pst-color-success-text:#fff;--pst-color-success-highlight:#2d5537;--sd-color-success:var(--pst-color-success);--sd-color-success-text:var(--pst-color-success-text);--sd-color-success-highlight:var(--pst-color-success-highlight);--pst-color-info:#459db9;--pst-color-info-text:#fff;--pst-color-info-highlight:#306e81;--sd-color-info:var(--pst-color-info);--sd-color-info-text:var(--pst-color-info-text);--sd-color-info-highlight:var(--pst-color-info-highlight);--pst-color-warning:#ee9040;--pst-color-warning-text:#fff;--pst-color-warning-highlight:#cf6912;--sd-color-warning:var(--pst-color-warning);--sd-color-warning-text:var(--pst-color-warning-text);--sd-color-warning-highlight:var(--pst-color-warning-highlight);--pst-color-danger:#cb4653;--pst-color-danger-text:#fff;--pst-color-danger-highlight:#992b36;--sd-color-danger:var(--pst-color-danger);--sd-color-danger-text:var(--pst-color-danger-text);--sd-color-danger-highlight:var(--pst-color-danger-highlight);--pst-color-light:#c9c9c9;--pst-color-light-text:#000;--pst-color-light-highlight:#a3a3a3;--sd-color-light:var(--pst-color-light);--sd-color-light-text:var(--pst-color-light-text);--sd-color-light-highlight:var(--pst-color-light-highlight);--pst-color-muted:#a6a6a6;--pst-color-muted-text:#fff;--pst-color-muted-highlight:gray;--sd-color-muted:var(--pst-color-muted);--sd-color-muted-text:var(--pst-color-muted-text);--sd-color-muted-highlight:var(--pst-color-muted-highlight);--pst-color-dark:#cecece;--pst-color-dark-text:#000;--pst-color-dark-highlight:#a8a8a8;--sd-color-dark:var(--pst-color-dark);--sd-color-dark-text:var(--pst-color-dark-text);--sd-color-dark-highlight:var(--pst-color-dark-highlight);--pst-color-black:#000;--pst-color-black-text:#fff;--pst-color-black-highlight:#000;--sd-color-black:var(--pst-color-black);--sd-color-black-text:var(--pst-color-black-text);--sd-color-black-highlight:var(--pst-color-black-highlight);--pst-color-white:#fff;--pst-color-white-text:#000;--pst-color-white-highlight:#d9d9d9;--sd-color-white:var(--pst-color-white);--sd-color-white-text:var(--pst-color-white-text);--sd-color-white-highlight:var(--pst-color-white-highlight)}html[data-theme=dark],html[data-theme=light]{--sd-color-card-border:var(--pst-color-border)}html[data-theme=light] .sd-shadow-lg,html[data-theme=light] .sd-shadow-md,html[data-theme=light] .sd-shadow-sm,html[data-theme=light] .sd-shadow-xs{box-shadow:0 .2rem .5rem var(--pst-color-shadow),0 0 .0625rem var(--pst-color-shadow)!important}.bd-content .sd-card{border:1px solid var(--pst-color-border)}.bd-content .sd-card .sd-card-header{background-color:var(--pst-color-panel-background);border-bottom:1px solid var(--pst-color-border)}.bd-content .sd-card .sd-card-footer{border-top:1px solid var(--pst-color-border)}.bd-content .sd-card .sd-card-body,.bd-content .sd-card .sd-card-footer{background-color:var(--pst-color-panel-background)}.bd-content .sd-tab-set>input:checked+label,.bd-content .sd-tab-set>input:not(:checked)+label:hover{border-color:var(--pst-color-primary);color:var(--pst-color-primary)}.bd-content .sd-tab-set>input:not(:checked)+label:hover{opacity:.5}.bd-content .sd-tab-set>label{color:var(--pst-color-text-muted)}html .bd-content .sd-tab-set>label:hover{border-color:var(--pst-color-primary);color:var(--pst-color-primary);opacity:.5}details.sd-dropdown{border:0!important;box-shadow:0 .2rem .5rem var(--pst-color-shadow),0 0 .0625rem var(--pst-color-shadow)!important}details.sd-dropdown summary.sd-card-header{border:0!important}details.sd-dropdown summary.sd-card-header+div.sd-summary-content{border:0}details.sd-dropdown summary.sd-card-header{align-items:center;background-color:unset!important;border-left:.2rem solid var(--pst-sd-dropdown-color)!important;color:var(--pst-color-text)!important;display:flex;font-weight:600;padding-bottom:.5rem;padding-top:.5rem;position:relative}details.sd-dropdown summary.sd-card-header,details.sd-dropdown summary.sd-card-header+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-card-border)}details.sd-dropdown summary.sd-card-header.sd-bg-primary,details.sd-dropdown summary.sd-card-header.sd-bg-primary+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-primary)}details.sd-dropdown summary.sd-card-header.sd-bg-secondary,details.sd-dropdown summary.sd-card-header.sd-bg-secondary+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-secondary)}details.sd-dropdown summary.sd-card-header.sd-bg-success,details.sd-dropdown summary.sd-card-header.sd-bg-success+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-success)}details.sd-dropdown summary.sd-card-header.sd-bg-info,details.sd-dropdown summary.sd-card-header.sd-bg-info+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-info)}details.sd-dropdown summary.sd-card-header.sd-bg-warning,details.sd-dropdown summary.sd-card-header.sd-bg-warning+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-warning)}details.sd-dropdown summary.sd-card-header.sd-bg-danger,details.sd-dropdown summary.sd-card-header.sd-bg-danger+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-danger)}details.sd-dropdown summary.sd-card-header.sd-bg-light,details.sd-dropdown summary.sd-card-header.sd-bg-light+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-light)}details.sd-dropdown summary.sd-card-header.sd-bg-muted,details.sd-dropdown summary.sd-card-header.sd-bg-muted+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-muted)}details.sd-dropdown summary.sd-card-header.sd-bg-dark,details.sd-dropdown summary.sd-card-header.sd-bg-dark+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-dark)}details.sd-dropdown summary.sd-card-header.sd-bg-black,details.sd-dropdown summary.sd-card-header.sd-bg-black+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-black)}details.sd-dropdown summary.sd-card-header.sd-bg-white,details.sd-dropdown summary.sd-card-header.sd-bg-white+div.sd-summary-content{--pst-sd-dropdown-color:var(--sd-color-white)}details.sd-dropdown summary.sd-card-header:before{background-color:var(--pst-sd-dropdown-color);content:"";height:100%;left:0;opacity:.1;pointer-events:none;position:absolute;top:0;width:100%}details.sd-dropdown summary.sd-card-header+div.sd-summary-content{border-bottom-left-radius:calc(.25rem - 1px);border-left:.2rem solid var(--pst-sd-dropdown-color)!important}details.sd-dropdown summary.sd-card-header span.sd-summary-icon{align-items:center;color:var(--pst-sd-dropdown-color)!important;display:inline-flex}details.sd-dropdown summary.sd-card-header .sd-summary-down,details.sd-dropdown summary.sd-card-header .sd-summary-up{top:.7rem}.bd-content .admonition button.toggle-button{color:inherit}.bd-content details.toggle-details summary{border-left:3px solid var(--pst-color-primary)}html div.rendered_html html .jp-RenderedHTMLCommon table{table-layout:auto}html[data-theme=dark] .bd-content .nboutput .output_area.rendered_html{background-color:var(--pst-color-text-base);border-radius:.25rem;color:var(--pst-color-on-background);padding:.5rem}html[data-theme=dark] .bd-content .nboutput .output_area.stderr{background:var(--pst-color-danger)}div.nblast.container{margin-bottom:1rem}div.cell_output .output{max-width:100%;overflow-x:auto}div.cell_output .output::-webkit-scrollbar{height:.5rem;width:.5rem}div.cell_output .output::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted);border-radius:.25rem}div.cell_output .output::-webkit-scrollbar-track{background:transparent}div.cell_output .output::-webkit-scrollbar-thumb{background:var(--pst-color-on-surface)}div.cell_output .output::-webkit-scrollbar-thumb:hover,div.cell_output .output:hover::-webkit-scrollbar-thumb{background:var(--pst-color-text-muted)}html[data-theme=dark] .bd-content div.cell_output .text_html,html[data-theme=dark] .bd-content div.cell_output img{background-color:var(--pst-color-text-base);border-radius:.25rem;color:var(--pst-color-on-background);padding:.5rem}.bd-content div.cell_input{display:flex;flex-direction:column;justify-content:stretch}.bd-content div.cell_input,.bd-content div.output{border-radius:.25rem}.bd-content div.output table{table-layout:auto}html[data-theme=dark] .bd-content img.leaflet-tile.leaflet-tile-loaded{border-radius:0;padding:0}.bd-search-container div#search-results>h2{font-size:var(--pst-font-size-icon);margin-top:0}.bd-search-container div#search-results p.search-summary{color:var(--pst-color-text-muted)}.bd-search-container ul.search{list-style:none;margin:0}.bd-search-container ul.search li{background-image:none;border-top:1px solid var(--pst-color-text-muted);margin:1rem 0;padding:1rem 0}.bd-search-container ul.search li>a{font-size:1.2em}.bd-search-container ul.search li div.context,.bd-search-container ul.search li p.context{color:var(--pst-color-text-base);margin:.5em 0 0}.bd-search-container ul.search li div.context a:before,.bd-search-container ul.search li p.context a:before{color:var(--pst-color-text-muted);content:"#";padding-right:.2em} \ No newline at end of file diff --git a/_static/styles/theme.css b/_static/styles/theme.css deleted file mode 100644 index 4519dd912..000000000 --- a/_static/styles/theme.css +++ /dev/null @@ -1,2 +0,0 @@ -/* Provided by Sphinx's 'basic' theme, and included in the final set of assets */ -@import "../basic.css"; diff --git a/_static/vendor/fontawesome/6.1.2/LICENSE.txt b/_static/vendor/fontawesome/6.1.2/LICENSE.txt deleted file mode 100644 index cc557ece4..000000000 --- a/_static/vendor/fontawesome/6.1.2/LICENSE.txt +++ /dev/null @@ -1,165 +0,0 @@ -Fonticons, Inc. (https://fontawesome.com) - --------------------------------------------------------------------------------- - -Font Awesome Free License - -Font Awesome Free is free, open source, and GPL friendly. You can use it for -commercial projects, open source projects, or really almost whatever you want. -Full Font Awesome Free license: https://fontawesome.com/license/free. - --------------------------------------------------------------------------------- - -# Icons: CC BY 4.0 License (https://creativecommons.org/licenses/by/4.0/) - -The Font Awesome Free download is licensed under a Creative Commons -Attribution 4.0 International License and applies to all icons packaged -as SVG and JS file types. - --------------------------------------------------------------------------------- - -# Fonts: SIL OFL 1.1 License - -In the Font Awesome Free download, the SIL OFL license applies to all icons -packaged as web and desktop font files. - -Copyright (c) 2022 Fonticons, Inc. (https://fontawesome.com) -with Reserved Font Name: "Font Awesome". - -This Font Software is licensed under the SIL Open Font License, Version 1.1. -This license is copied below, and is also available with a FAQ at: -http://scripts.sil.org/OFL - -SIL OPEN FONT LICENSE -Version 1.1 - 26 February 2007 - -PREAMBLE -The goals of the Open Font License (OFL) are to stimulate worldwide -development of collaborative font projects, to support the font creation -efforts of academic and linguistic communities, and to provide a free and -open framework in which fonts may be shared and improved in partnership -with others. - -The OFL allows the licensed fonts to be used, studied, modified and -redistributed freely as long as they are not sold by themselves. The -fonts, including any derivative works, can be bundled, embedded, -redistributed and/or sold with any software provided that any reserved -names are not used by derivative works. The fonts and derivatives, -however, cannot be released under any other type of license. The -requirement for fonts to remain under this license does not apply -to any document created using the fonts or their derivatives. - -DEFINITIONS -"Font Software" refers to the set of files released by the Copyright -Holder(s) under this license and clearly marked as such. This may -include source files, build scripts and documentation. - -"Reserved Font Name" refers to any names specified as such after the -copyright statement(s). - -"Original Version" refers to the collection of Font Software components as -distributed by the Copyright Holder(s). - -"Modified Version" refers to any derivative made by adding to, deleting, -or substituting — in part or in whole — any of the components of the -Original Version, by changing formats or by porting the Font Software to a -new environment. - -"Author" refers to any designer, engineer, programmer, technical -writer or other person who contributed to the Font Software. - -PERMISSION & CONDITIONS -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Font Software, to use, study, copy, merge, embed, modify, -redistribute, and sell modified and unmodified copies of the Font -Software, subject to the following conditions: - -1) Neither the Font Software nor any of its individual components, -in Original or Modified Versions, may be sold by itself. - -2) Original or Modified Versions of the Font Software may be bundled, -redistributed and/or sold with any software, provided that each copy -contains the above copyright notice and this license. These can be -included either as stand-alone text files, human-readable headers or -in the appropriate machine-readable metadata fields within text or -binary files as long as those fields can be easily viewed by the user. - -3) No Modified Version of the Font Software may use the Reserved Font -Name(s) unless explicit written permission is granted by the corresponding -Copyright Holder. This restriction only applies to the primary font name as -presented to the users. - -4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font -Software shall not be used to promote, endorse or advertise any -Modified Version, except to acknowledge the contribution(s) of the -Copyright Holder(s) and the Author(s) or with their explicit written -permission. - -5) The Font Software, modified or unmodified, in part or in whole, -must be distributed entirely under this license, and must not be -distributed under any other license. The requirement for fonts to -remain under this license does not apply to any document created -using the Font Software. - -TERMINATION -This license becomes null and void if any of the above conditions are -not met. - -DISCLAIMER -THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT -OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL -DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM -OTHER DEALINGS IN THE FONT SOFTWARE. - --------------------------------------------------------------------------------- - -# Code: MIT License (https://opensource.org/licenses/MIT) - -In the Font Awesome Free download, the MIT license applies to all non-font and -non-icon files. - -Copyright 2022 Fonticons, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in the -Software without restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, subject to the -following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -# Attribution - -Attribution is required by MIT, SIL OFL, and CC BY licenses. Downloaded Font -Awesome Free files already contain embedded comments with sufficient -attribution, so you shouldn't need to do anything additional when using these -files normally. - -We've kept attribution comments terse, so we ask that you do not actively work -to remove them from files, especially code. They're a great way for folks to -learn about Font Awesome. - --------------------------------------------------------------------------------- - -# Brand Icons - -All brand icons are trademarks of their respective owners. The use of these -trademarks does not indicate endorsement of the trademark holder by Font -Awesome, nor vice versa. **Please do not use brand logos for any purpose except -to represent the company, product, or service to which they refer.** diff --git a/_static/vendor/fontawesome/6.1.2/css/all.min.css b/_static/vendor/fontawesome/6.1.2/css/all.min.css deleted file mode 100644 index b9a01fbb7..000000000 --- a/_static/vendor/fontawesome/6.1.2/css/all.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Font Awesome Free 6.1.2 by @fontawesome - https://fontawesome.com - * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) - * Copyright 2022 Fonticons, Inc. - */ -.fa{font-family:var(--fa-style-family,"Font Awesome 6 Free");font-weight:var(--fa-style,900)}.fa,.fa-brands,.fa-duotone,.fa-light,.fa-regular,.fa-solid,.fa-thin,.fab,.fad,.fal,.far,.fas,.fat{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:var(--fa-display,inline-block);font-style:normal;font-variant:normal;line-height:1;text-rendering:auto}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-2xs{font-size:.625em;line-height:.1em;vertical-align:.225em}.fa-xs{font-size:.75em;line-height:.08333em;vertical-align:.125em}.fa-sm{font-size:.875em;line-height:.07143em;vertical-align:.05357em}.fa-lg{font-size:1.25em;line-height:.05em;vertical-align:-.075em}.fa-xl{font-size:1.5em;line-height:.04167em;vertical-align:-.125em}.fa-2xl{font-size:2em;line-height:.03125em;vertical-align:-.1875em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:var(--fa-li-margin,2.5em);padding-left:0}.fa-ul>li{position:relative}.fa-li{left:calc(var(--fa-li-width, 2em)*-1);position:absolute;text-align:center;width:var(--fa-li-width,2em);line-height:inherit}.fa-border{border-radius:var(--fa-border-radius,.1em);border:var(--fa-border-width,.08em) var(--fa-border-style,solid) var(--fa-border-color,#eee);padding:var(--fa-border-padding,.2em .25em .15em)}.fa-pull-left{float:left;margin-right:var(--fa-pull-margin,.3em)}.fa-pull-right{float:right;margin-left:var(--fa-pull-margin,.3em)}.fa-beat{-webkit-animation-name:fa-beat;animation-name:fa-beat;-webkit-animation-delay:var(--fa-animation-delay,0);animation-delay:var(--fa-animation-delay,0);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-bounce{-webkit-animation-name:fa-bounce;animation-name:fa-bounce;-webkit-animation-delay:var(--fa-animation-delay,0);animation-delay:var(--fa-animation-delay,0);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.28,.84,.42,1))}.fa-fade{-webkit-animation-name:fa-fade;animation-name:fa-fade;-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-beat-fade,.fa-fade{-webkit-animation-delay:var(--fa-animation-delay,0);animation-delay:var(--fa-animation-delay,0);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s)}.fa-beat-fade{-webkit-animation-name:fa-beat-fade;animation-name:fa-beat-fade;-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1));animation-timing-function:var(--fa-animation-timing,cubic-bezier(.4,0,.6,1))}.fa-flip{-webkit-animation-name:fa-flip;animation-name:fa-flip;-webkit-animation-delay:var(--fa-animation-delay,0);animation-delay:var(--fa-animation-delay,0);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,ease-in-out);animation-timing-function:var(--fa-animation-timing,ease-in-out)}.fa-shake{-webkit-animation-name:fa-shake;animation-name:fa-shake;-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-shake,.fa-spin{-webkit-animation-delay:var(--fa-animation-delay,0);animation-delay:var(--fa-animation-delay,0);-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal)}.fa-spin{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-duration:var(--fa-animation-duration,2s);animation-duration:var(--fa-animation-duration,2s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,linear);animation-timing-function:var(--fa-animation-timing,linear)}.fa-spin-reverse{--fa-animation-direction:reverse}.fa-pulse,.fa-spin-pulse{-webkit-animation-name:fa-spin;animation-name:fa-spin;-webkit-animation-direction:var(--fa-animation-direction,normal);animation-direction:var(--fa-animation-direction,normal);-webkit-animation-duration:var(--fa-animation-duration,1s);animation-duration:var(--fa-animation-duration,1s);-webkit-animation-iteration-count:var(--fa-animation-iteration-count,infinite);animation-iteration-count:var(--fa-animation-iteration-count,infinite);-webkit-animation-timing-function:var(--fa-animation-timing,steps(8));animation-timing-function:var(--fa-animation-timing,steps(8))}@media (prefers-reduced-motion:reduce){.fa-beat,.fa-beat-fade,.fa-bounce,.fa-fade,.fa-flip,.fa-pulse,.fa-shake,.fa-spin,.fa-spin-pulse{-webkit-animation-delay:-1ms;animation-delay:-1ms;-webkit-animation-duration:1ms;animation-duration:1ms;-webkit-animation-iteration-count:1;animation-iteration-count:1;transition-delay:0s;transition-duration:0s}}@-webkit-keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@keyframes fa-beat{0%,90%{-webkit-transform:scale(1);transform:scale(1)}45%{-webkit-transform:scale(var(--fa-beat-scale,1.25));transform:scale(var(--fa-beat-scale,1.25))}}@-webkit-keyframes fa-bounce{0%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}to{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}}@keyframes fa-bounce{0%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}10%{-webkit-transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0);transform:scale(var(--fa-bounce-start-scale-x,1.1),var(--fa-bounce-start-scale-y,.9)) translateY(0)}30%{-webkit-transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em));transform:scale(var(--fa-bounce-jump-scale-x,.9),var(--fa-bounce-jump-scale-y,1.1)) translateY(var(--fa-bounce-height,-.5em))}50%{-webkit-transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0);transform:scale(var(--fa-bounce-land-scale-x,1.05),var(--fa-bounce-land-scale-y,.95)) translateY(0)}57%{-webkit-transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em));transform:scale(1) translateY(var(--fa-bounce-rebound,-.125em))}64%{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}to{-webkit-transform:scale(1) translateY(0);transform:scale(1) translateY(0)}}@-webkit-keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@keyframes fa-fade{50%{opacity:var(--fa-fade-opacity,.4)}}@-webkit-keyframes fa-beat-fade{0%,to{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@keyframes fa-beat-fade{0%,to{opacity:var(--fa-beat-fade-opacity,.4);-webkit-transform:scale(1);transform:scale(1)}50%{opacity:1;-webkit-transform:scale(var(--fa-beat-fade-scale,1.125));transform:scale(var(--fa-beat-fade-scale,1.125))}}@-webkit-keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@keyframes fa-flip{50%{-webkit-transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg));transform:rotate3d(var(--fa-flip-x,0),var(--fa-flip-y,1),var(--fa-flip-z,0),var(--fa-flip-angle,-180deg))}}@-webkit-keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}8%,24%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}40%,to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}@keyframes fa-shake{0%{-webkit-transform:rotate(-15deg);transform:rotate(-15deg)}4%{-webkit-transform:rotate(15deg);transform:rotate(15deg)}8%,24%{-webkit-transform:rotate(-18deg);transform:rotate(-18deg)}12%,28%{-webkit-transform:rotate(18deg);transform:rotate(18deg)}16%{-webkit-transform:rotate(-22deg);transform:rotate(-22deg)}20%{-webkit-transform:rotate(22deg);transform:rotate(22deg)}32%{-webkit-transform:rotate(-12deg);transform:rotate(-12deg)}36%{-webkit-transform:rotate(12deg);transform:rotate(12deg)}40%,to{-webkit-transform:rotate(0deg);transform:rotate(0deg)}}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.fa-rotate-90{-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-webkit-transform:scaleY(-1);transform:scaleY(-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1);transform:scale(-1)}.fa-rotate-by{-webkit-transform:rotate(var(--fa-rotate-angle,none));transform:rotate(var(--fa-rotate-angle,none))}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%;z-index:var(--fa-stack-z-index,auto)}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:var(--fa-inverse,#fff)}.fa-0:before{content:"\30"}.fa-1:before{content:"\31"}.fa-2:before{content:"\32"}.fa-3:before{content:"\33"}.fa-4:before{content:"\34"}.fa-5:before{content:"\35"}.fa-6:before{content:"\36"}.fa-7:before{content:"\37"}.fa-8:before{content:"\38"}.fa-9:before{content:"\39"}.fa-a:before{content:"\41"}.fa-address-book:before,.fa-contact-book:before{content:"\f2b9"}.fa-address-card:before,.fa-contact-card:before,.fa-vcard:before{content:"\f2bb"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-anchor:before{content:"\f13d"}.fa-anchor-circle-check:before{content:"\e4aa"}.fa-anchor-circle-exclamation:before{content:"\e4ab"}.fa-anchor-circle-xmark:before{content:"\e4ac"}.fa-anchor-lock:before{content:"\e4ad"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-double-down:before,.fa-angles-down:before{content:"\f103"}.fa-angle-double-left:before,.fa-angles-left:before{content:"\f100"}.fa-angle-double-right:before,.fa-angles-right:before{content:"\f101"}.fa-angle-double-up:before,.fa-angles-up:before{content:"\f102"}.fa-ankh:before{content:"\f644"}.fa-apple-alt:before,.fa-apple-whole:before{content:"\f5d1"}.fa-archway:before{content:"\f557"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-down-1-9:before,.fa-sort-numeric-asc:before,.fa-sort-numeric-down:before{content:"\f162"}.fa-arrow-down-9-1:before,.fa-sort-numeric-desc:before,.fa-sort-numeric-down-alt:before{content:"\f886"}.fa-arrow-down-a-z:before,.fa-sort-alpha-asc:before,.fa-sort-alpha-down:before{content:"\f15d"}.fa-arrow-down-long:before,.fa-long-arrow-down:before{content:"\f175"}.fa-arrow-down-short-wide:before,.fa-sort-amount-desc:before,.fa-sort-amount-down-alt:before{content:"\f884"}.fa-arrow-down-up-across-line:before{content:"\e4af"}.fa-arrow-down-up-lock:before{content:"\e4b0"}.fa-arrow-down-wide-short:before,.fa-sort-amount-asc:before,.fa-sort-amount-down:before{content:"\f160"}.fa-arrow-down-z-a:before,.fa-sort-alpha-desc:before,.fa-sort-alpha-down-alt:before{content:"\f881"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-left-long:before,.fa-long-arrow-left:before{content:"\f177"}.fa-arrow-pointer:before,.fa-mouse-pointer:before{content:"\f245"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-right-arrow-left:before,.fa-exchange:before{content:"\f0ec"}.fa-arrow-right-from-bracket:before,.fa-sign-out:before{content:"\f08b"}.fa-arrow-right-long:before,.fa-long-arrow-right:before{content:"\f178"}.fa-arrow-right-to-bracket:before,.fa-sign-in:before{content:"\f090"}.fa-arrow-right-to-city:before{content:"\e4b3"}.fa-arrow-left-rotate:before,.fa-arrow-rotate-back:before,.fa-arrow-rotate-backward:before,.fa-arrow-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-arrow-right-rotate:before,.fa-arrow-rotate-forward:before,.fa-arrow-rotate-right:before,.fa-redo:before{content:"\f01e"}.fa-arrow-trend-down:before{content:"\e097"}.fa-arrow-trend-up:before{content:"\e098"}.fa-arrow-turn-down:before,.fa-level-down:before{content:"\f149"}.fa-arrow-turn-up:before,.fa-level-up:before{content:"\f148"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-up-1-9:before,.fa-sort-numeric-up:before{content:"\f163"}.fa-arrow-up-9-1:before,.fa-sort-numeric-up-alt:before{content:"\f887"}.fa-arrow-up-a-z:before,.fa-sort-alpha-up:before{content:"\f15e"}.fa-arrow-up-from-bracket:before{content:"\e09a"}.fa-arrow-up-from-ground-water:before{content:"\e4b5"}.fa-arrow-up-from-water-pump:before{content:"\e4b6"}.fa-arrow-up-long:before,.fa-long-arrow-up:before{content:"\f176"}.fa-arrow-up-right-dots:before{content:"\e4b7"}.fa-arrow-up-right-from-square:before,.fa-external-link:before{content:"\f08e"}.fa-arrow-up-short-wide:before,.fa-sort-amount-up-alt:before{content:"\f885"}.fa-arrow-up-wide-short:before,.fa-sort-amount-up:before{content:"\f161"}.fa-arrow-up-z-a:before,.fa-sort-alpha-up-alt:before{content:"\f882"}.fa-arrows-down-to-line:before{content:"\e4b8"}.fa-arrows-down-to-people:before{content:"\e4b9"}.fa-arrows-h:before,.fa-arrows-left-right:before{content:"\f07e"}.fa-arrows-left-right-to-line:before{content:"\e4ba"}.fa-arrows-rotate:before,.fa-refresh:before,.fa-sync:before{content:"\f021"}.fa-arrows-spin:before{content:"\e4bb"}.fa-arrows-split-up-and-left:before{content:"\e4bc"}.fa-arrows-to-circle:before{content:"\e4bd"}.fa-arrows-to-dot:before{content:"\e4be"}.fa-arrows-to-eye:before{content:"\e4bf"}.fa-arrows-turn-right:before{content:"\e4c0"}.fa-arrows-turn-to-dots:before{content:"\e4c1"}.fa-arrows-up-down:before,.fa-arrows-v:before{content:"\f07d"}.fa-arrows-up-down-left-right:before,.fa-arrows:before{content:"\f047"}.fa-arrows-up-to-line:before{content:"\e4c2"}.fa-asterisk:before{content:"\2a"}.fa-at:before{content:"\40"}.fa-atom:before{content:"\f5d2"}.fa-audio-description:before{content:"\f29e"}.fa-austral-sign:before{content:"\e0a9"}.fa-award:before{content:"\f559"}.fa-b:before{content:"\42"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before,.fa-carriage-baby:before{content:"\f77d"}.fa-backward:before{content:"\f04a"}.fa-backward-fast:before,.fa-fast-backward:before{content:"\f049"}.fa-backward-step:before,.fa-step-backward:before{content:"\f048"}.fa-bacon:before{content:"\f7e5"}.fa-bacteria:before{content:"\e059"}.fa-bacterium:before{content:"\e05a"}.fa-bag-shopping:before,.fa-shopping-bag:before{content:"\f290"}.fa-bahai:before,.fa-haykal:before{content:"\f666"}.fa-baht-sign:before{content:"\e0ac"}.fa-ban:before,.fa-cancel:before{content:"\f05e"}.fa-ban-smoking:before,.fa-smoking-ban:before{content:"\f54d"}.fa-band-aid:before,.fa-bandage:before{content:"\f462"}.fa-barcode:before{content:"\f02a"}.fa-bars:before,.fa-navicon:before{content:"\f0c9"}.fa-bars-progress:before,.fa-tasks-alt:before{content:"\f828"}.fa-bars-staggered:before,.fa-reorder:before,.fa-stream:before{content:"\f550"}.fa-baseball-ball:before,.fa-baseball:before{content:"\f433"}.fa-baseball-bat-ball:before{content:"\f432"}.fa-basket-shopping:before,.fa-shopping-basket:before{content:"\f291"}.fa-basketball-ball:before,.fa-basketball:before{content:"\f434"}.fa-bath:before,.fa-bathtub:before{content:"\f2cd"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-battery-5:before,.fa-battery-full:before,.fa-battery:before{content:"\f240"}.fa-battery-3:before,.fa-battery-half:before{content:"\f242"}.fa-battery-2:before,.fa-battery-quarter:before{content:"\f243"}.fa-battery-4:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-bed:before{content:"\f236"}.fa-bed-pulse:before,.fa-procedures:before{content:"\f487"}.fa-beer-mug-empty:before,.fa-beer:before{content:"\f0fc"}.fa-bell:before{content:"\f0f3"}.fa-bell-concierge:before,.fa-concierge-bell:before{content:"\f562"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bicycle:before{content:"\f206"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-bitcoin-sign:before{content:"\e0b4"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blog:before{content:"\f781"}.fa-bold:before{content:"\f032"}.fa-bolt:before,.fa-zap:before{content:"\f0e7"}.fa-bolt-lightning:before{content:"\e0b7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-atlas:before,.fa-book-atlas:before{content:"\f558"}.fa-bible:before,.fa-book-bible:before{content:"\f647"}.fa-book-bookmark:before{content:"\e0bb"}.fa-book-journal-whills:before,.fa-journal-whills:before{content:"\f66a"}.fa-book-medical:before{content:"\f7e6"}.fa-book-open:before{content:"\f518"}.fa-book-open-reader:before,.fa-book-reader:before{content:"\f5da"}.fa-book-quran:before,.fa-quran:before{content:"\f687"}.fa-book-dead:before,.fa-book-skull:before{content:"\f6b7"}.fa-book-tanakh:before,.fa-tanakh:before{content:"\f827"}.fa-bookmark:before{content:"\f02e"}.fa-border-all:before{content:"\f84c"}.fa-border-none:before{content:"\f850"}.fa-border-style:before,.fa-border-top-left:before{content:"\f853"}.fa-bore-hole:before{content:"\e4c3"}.fa-bottle-droplet:before{content:"\e4c4"}.fa-bottle-water:before{content:"\e4c5"}.fa-bowl-food:before{content:"\e4c6"}.fa-bowl-rice:before{content:"\e2eb"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-archive:before,.fa-box-archive:before{content:"\f187"}.fa-box-open:before{content:"\f49e"}.fa-box-tissue:before{content:"\e05b"}.fa-boxes-packing:before{content:"\e4c7"}.fa-boxes-alt:before,.fa-boxes-stacked:before,.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-brazilian-real-sign:before{content:"\e46c"}.fa-bread-slice:before{content:"\f7ec"}.fa-bridge:before{content:"\e4c8"}.fa-bridge-circle-check:before{content:"\e4c9"}.fa-bridge-circle-exclamation:before{content:"\e4ca"}.fa-bridge-circle-xmark:before{content:"\e4cb"}.fa-bridge-lock:before{content:"\e4cc"}.fa-bridge-water:before{content:"\e4ce"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broom:before{content:"\f51a"}.fa-broom-ball:before,.fa-quidditch-broom-ball:before,.fa-quidditch:before{content:"\f458"}.fa-brush:before{content:"\f55d"}.fa-bucket:before{content:"\e4cf"}.fa-bug:before{content:"\f188"}.fa-bug-slash:before{content:"\e490"}.fa-bugs:before{content:"\e4d0"}.fa-building:before{content:"\f1ad"}.fa-building-circle-arrow-right:before{content:"\e4d1"}.fa-building-circle-check:before{content:"\e4d2"}.fa-building-circle-exclamation:before{content:"\e4d3"}.fa-building-circle-xmark:before{content:"\e4d4"}.fa-bank:before,.fa-building-columns:before,.fa-institution:before,.fa-museum:before,.fa-university:before{content:"\f19c"}.fa-building-flag:before{content:"\e4d5"}.fa-building-lock:before{content:"\e4d6"}.fa-building-ngo:before{content:"\e4d7"}.fa-building-shield:before{content:"\e4d8"}.fa-building-un:before{content:"\e4d9"}.fa-building-user:before{content:"\e4da"}.fa-building-wheat:before{content:"\e4db"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burger:before,.fa-hamburger:before{content:"\f805"}.fa-burst:before{content:"\e4dc"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before,.fa-bus-simple:before{content:"\f55e"}.fa-briefcase-clock:before,.fa-business-time:before{content:"\f64a"}.fa-c:before{content:"\43"}.fa-cable-car:before,.fa-tram:before{content:"\f7da"}.fa-birthday-cake:before,.fa-cake-candles:before,.fa-cake:before{content:"\f1fd"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-alt:before,.fa-calendar-days:before{content:"\f073"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-week:before{content:"\f784"}.fa-calendar-times:before,.fa-calendar-xmark:before{content:"\f273"}.fa-camera-alt:before,.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-camera-rotate:before{content:"\e0d8"}.fa-campground:before{content:"\f6bb"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-battery-car:before,.fa-car-battery:before{content:"\f5df"}.fa-car-burst:before,.fa-car-crash:before{content:"\f5e1"}.fa-car-on:before{content:"\e4dd"}.fa-car-alt:before,.fa-car-rear:before{content:"\f5de"}.fa-car-side:before{content:"\f5e4"}.fa-car-tunnel:before{content:"\e4de"}.fa-caravan:before{content:"\f8ff"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-flatbed:before,.fa-dolly-flatbed:before{content:"\f474"}.fa-cart-flatbed-suitcase:before,.fa-luggage-cart:before{content:"\f59d"}.fa-cart-plus:before{content:"\f217"}.fa-cart-shopping:before,.fa-shopping-cart:before{content:"\f07a"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cedi-sign:before{content:"\e0df"}.fa-cent-sign:before{content:"\e3f5"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-blackboard:before,.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before,.fa-chalkboard-user:before{content:"\f51c"}.fa-champagne-glasses:before,.fa-glass-cheers:before{content:"\f79f"}.fa-charging-station:before{content:"\f5e7"}.fa-area-chart:before,.fa-chart-area:before{content:"\f1fe"}.fa-bar-chart:before,.fa-chart-bar:before{content:"\f080"}.fa-chart-column:before{content:"\e0e3"}.fa-chart-gantt:before{content:"\e0e4"}.fa-chart-line:before,.fa-line-chart:before{content:"\f201"}.fa-chart-pie:before,.fa-pie-chart:before{content:"\f200"}.fa-chart-simple:before{content:"\e473"}.fa-check:before{content:"\f00c"}.fa-check-double:before{content:"\f560"}.fa-check-to-slot:before,.fa-vote-yea:before{content:"\f772"}.fa-cheese:before{content:"\f7ef"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-child-dress:before{content:"\e59c"}.fa-child-reaching:before{content:"\e59d"}.fa-child-rifle:before{content:"\e4e0"}.fa-children:before{content:"\e4e1"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-arrow-circle-down:before,.fa-circle-arrow-down:before{content:"\f0ab"}.fa-arrow-circle-left:before,.fa-circle-arrow-left:before{content:"\f0a8"}.fa-arrow-circle-right:before,.fa-circle-arrow-right:before{content:"\f0a9"}.fa-arrow-circle-up:before,.fa-circle-arrow-up:before{content:"\f0aa"}.fa-check-circle:before,.fa-circle-check:before{content:"\f058"}.fa-chevron-circle-down:before,.fa-circle-chevron-down:before{content:"\f13a"}.fa-chevron-circle-left:before,.fa-circle-chevron-left:before{content:"\f137"}.fa-chevron-circle-right:before,.fa-circle-chevron-right:before{content:"\f138"}.fa-chevron-circle-up:before,.fa-circle-chevron-up:before{content:"\f139"}.fa-circle-dollar-to-slot:before,.fa-donate:before{content:"\f4b9"}.fa-circle-dot:before,.fa-dot-circle:before{content:"\f192"}.fa-arrow-alt-circle-down:before,.fa-circle-down:before{content:"\f358"}.fa-circle-exclamation:before,.fa-exclamation-circle:before{content:"\f06a"}.fa-circle-h:before,.fa-hospital-symbol:before{content:"\f47e"}.fa-adjust:before,.fa-circle-half-stroke:before{content:"\f042"}.fa-circle-info:before,.fa-info-circle:before{content:"\f05a"}.fa-arrow-alt-circle-left:before,.fa-circle-left:before{content:"\f359"}.fa-circle-minus:before,.fa-minus-circle:before{content:"\f056"}.fa-circle-nodes:before{content:"\e4e2"}.fa-circle-notch:before{content:"\f1ce"}.fa-circle-pause:before,.fa-pause-circle:before{content:"\f28b"}.fa-circle-play:before,.fa-play-circle:before{content:"\f144"}.fa-circle-plus:before,.fa-plus-circle:before{content:"\f055"}.fa-circle-question:before,.fa-question-circle:before{content:"\f059"}.fa-circle-radiation:before,.fa-radiation-alt:before{content:"\f7ba"}.fa-arrow-alt-circle-right:before,.fa-circle-right:before{content:"\f35a"}.fa-circle-stop:before,.fa-stop-circle:before{content:"\f28d"}.fa-arrow-alt-circle-up:before,.fa-circle-up:before{content:"\f35b"}.fa-circle-user:before,.fa-user-circle:before{content:"\f2bd"}.fa-circle-xmark:before,.fa-times-circle:before,.fa-xmark-circle:before{content:"\f057"}.fa-city:before{content:"\f64f"}.fa-clapperboard:before{content:"\e131"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clipboard-question:before{content:"\e4e3"}.fa-clipboard-user:before{content:"\f7f3"}.fa-clock-four:before,.fa-clock:before{content:"\f017"}.fa-clock-rotate-left:before,.fa-history:before{content:"\f1da"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-arrow-down:before,.fa-cloud-download-alt:before,.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-arrow-up:before,.fa-cloud-upload-alt:before,.fa-cloud-upload:before{content:"\f0ee"}.fa-cloud-bolt:before,.fa-thunderstorm:before{content:"\f76c"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-showers-water:before{content:"\e4e4"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-clover:before{content:"\e139"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-code-commit:before{content:"\f386"}.fa-code-compare:before{content:"\e13a"}.fa-code-fork:before{content:"\e13b"}.fa-code-merge:before{content:"\f387"}.fa-code-pull-request:before{content:"\e13c"}.fa-coins:before{content:"\f51e"}.fa-colon-sign:before{content:"\e140"}.fa-comment:before{content:"\f075"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before,.fa-commenting:before{content:"\f4ad"}.fa-comment-medical:before{content:"\f7f5"}.fa-comment-slash:before{content:"\f4b3"}.fa-comment-sms:before,.fa-sms:before{content:"\f7cd"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compass-drafting:before,.fa-drafting-compass:before{content:"\f568"}.fa-compress:before{content:"\f066"}.fa-computer:before{content:"\e4e5"}.fa-computer-mouse:before,.fa-mouse:before{content:"\f8cc"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-couch:before{content:"\f4b8"}.fa-cow:before{content:"\f6c8"}.fa-credit-card-alt:before,.fa-credit-card:before{content:"\f09d"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before,.fa-crop-simple:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-crutch:before{content:"\f7f7"}.fa-cruzeiro-sign:before{content:"\e152"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cubes-stacked:before{content:"\e4e6"}.fa-d:before{content:"\44"}.fa-database:before{content:"\f1c0"}.fa-backspace:before,.fa-delete-left:before{content:"\f55a"}.fa-democrat:before{content:"\f747"}.fa-desktop-alt:before,.fa-desktop:before{content:"\f390"}.fa-dharmachakra:before{content:"\f655"}.fa-diagram-next:before{content:"\e476"}.fa-diagram-predecessor:before{content:"\e477"}.fa-diagram-project:before,.fa-project-diagram:before{content:"\f542"}.fa-diagram-successor:before{content:"\e47a"}.fa-diamond:before{content:"\f219"}.fa-diamond-turn-right:before,.fa-directions:before{content:"\f5eb"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-disease:before{content:"\f7fa"}.fa-display:before{content:"\e163"}.fa-divide:before{content:"\f529"}.fa-dna:before{content:"\f471"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before,.fa-dollar:before,.fa-usd:before{content:"\24"}.fa-dolly-box:before,.fa-dolly:before{content:"\f472"}.fa-dong-sign:before{content:"\e169"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dove:before{content:"\f4ba"}.fa-compress-alt:before,.fa-down-left-and-up-right-to-center:before{content:"\f422"}.fa-down-long:before,.fa-long-arrow-alt-down:before{content:"\f309"}.fa-download:before{content:"\f019"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-droplet:before,.fa-tint:before{content:"\f043"}.fa-droplet-slash:before,.fa-tint-slash:before{content:"\f5c7"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-e:before{content:"\45"}.fa-deaf:before,.fa-deafness:before,.fa-ear-deaf:before,.fa-hard-of-hearing:before{content:"\f2a4"}.fa-assistive-listening-systems:before,.fa-ear-listen:before{content:"\f2a2"}.fa-earth-africa:before,.fa-globe-africa:before{content:"\f57c"}.fa-earth-america:before,.fa-earth-americas:before,.fa-earth:before,.fa-globe-americas:before{content:"\f57d"}.fa-earth-asia:before,.fa-globe-asia:before{content:"\f57e"}.fa-earth-europe:before,.fa-globe-europe:before{content:"\f7a2"}.fa-earth-oceania:before,.fa-globe-oceania:before{content:"\e47b"}.fa-egg:before{content:"\f7fb"}.fa-eject:before{content:"\f052"}.fa-elevator:before{content:"\e16d"}.fa-ellipsis-h:before,.fa-ellipsis:before{content:"\f141"}.fa-ellipsis-v:before,.fa-ellipsis-vertical:before{content:"\f142"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-circle-check:before{content:"\e4e8"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelopes-bulk:before,.fa-mail-bulk:before{content:"\f674"}.fa-equals:before{content:"\3d"}.fa-eraser:before{content:"\f12d"}.fa-ethernet:before{content:"\f796"}.fa-eur:before,.fa-euro-sign:before,.fa-euro:before{content:"\f153"}.fa-exclamation:before{content:"\21"}.fa-expand:before{content:"\f065"}.fa-explosion:before{content:"\e4e9"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper-empty:before,.fa-eye-dropper:before,.fa-eyedropper:before{content:"\f1fb"}.fa-eye-low-vision:before,.fa-low-vision:before{content:"\f2a8"}.fa-eye-slash:before{content:"\f070"}.fa-f:before{content:"\46"}.fa-angry:before,.fa-face-angry:before{content:"\f556"}.fa-dizzy:before,.fa-face-dizzy:before{content:"\f567"}.fa-face-flushed:before,.fa-flushed:before{content:"\f579"}.fa-face-frown:before,.fa-frown:before{content:"\f119"}.fa-face-frown-open:before,.fa-frown-open:before{content:"\f57a"}.fa-face-grimace:before,.fa-grimace:before{content:"\f57f"}.fa-face-grin:before,.fa-grin:before{content:"\f580"}.fa-face-grin-beam:before,.fa-grin-beam:before{content:"\f582"}.fa-face-grin-beam-sweat:before,.fa-grin-beam-sweat:before{content:"\f583"}.fa-face-grin-hearts:before,.fa-grin-hearts:before{content:"\f584"}.fa-face-grin-squint:before,.fa-grin-squint:before{content:"\f585"}.fa-face-grin-squint-tears:before,.fa-grin-squint-tears:before{content:"\f586"}.fa-face-grin-stars:before,.fa-grin-stars:before{content:"\f587"}.fa-face-grin-tears:before,.fa-grin-tears:before{content:"\f588"}.fa-face-grin-tongue:before,.fa-grin-tongue:before{content:"\f589"}.fa-face-grin-tongue-squint:before,.fa-grin-tongue-squint:before{content:"\f58a"}.fa-face-grin-tongue-wink:before,.fa-grin-tongue-wink:before{content:"\f58b"}.fa-face-grin-wide:before,.fa-grin-alt:before{content:"\f581"}.fa-face-grin-wink:before,.fa-grin-wink:before{content:"\f58c"}.fa-face-kiss:before,.fa-kiss:before{content:"\f596"}.fa-face-kiss-beam:before,.fa-kiss-beam:before{content:"\f597"}.fa-face-kiss-wink-heart:before,.fa-kiss-wink-heart:before{content:"\f598"}.fa-face-laugh:before,.fa-laugh:before{content:"\f599"}.fa-face-laugh-beam:before,.fa-laugh-beam:before{content:"\f59a"}.fa-face-laugh-squint:before,.fa-laugh-squint:before{content:"\f59b"}.fa-face-laugh-wink:before,.fa-laugh-wink:before{content:"\f59c"}.fa-face-meh:before,.fa-meh:before{content:"\f11a"}.fa-face-meh-blank:before,.fa-meh-blank:before{content:"\f5a4"}.fa-face-rolling-eyes:before,.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-face-sad-cry:before,.fa-sad-cry:before{content:"\f5b3"}.fa-face-sad-tear:before,.fa-sad-tear:before{content:"\f5b4"}.fa-face-smile:before,.fa-smile:before{content:"\f118"}.fa-face-smile-beam:before,.fa-smile-beam:before{content:"\f5b8"}.fa-face-smile-wink:before,.fa-smile-wink:before{content:"\f4da"}.fa-face-surprise:before,.fa-surprise:before{content:"\f5c2"}.fa-face-tired:before,.fa-tired:before{content:"\f5c8"}.fa-fan:before{content:"\f863"}.fa-faucet:before{content:"\e005"}.fa-faucet-drip:before{content:"\e006"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before,.fa-feather-pointed:before{content:"\f56b"}.fa-ferry:before{content:"\e4ea"}.fa-file:before{content:"\f15b"}.fa-file-arrow-down:before,.fa-file-download:before{content:"\f56d"}.fa-file-arrow-up:before,.fa-file-upload:before{content:"\f574"}.fa-file-audio:before{content:"\f1c7"}.fa-file-circle-check:before{content:"\e5a0"}.fa-file-circle-exclamation:before{content:"\e4eb"}.fa-file-circle-minus:before{content:"\e4ed"}.fa-file-circle-plus:before{content:"\e494"}.fa-file-circle-question:before{content:"\e4ef"}.fa-file-circle-xmark:before{content:"\e5a1"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-excel:before{content:"\f1c3"}.fa-arrow-right-from-file:before,.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-arrow-right-to-file:before,.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-alt:before,.fa-file-lines:before,.fa-file-text:before{content:"\f15c"}.fa-file-medical:before{content:"\f477"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-edit:before,.fa-file-pen:before{content:"\f31c"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-shield:before{content:"\e4f0"}.fa-file-signature:before{content:"\f573"}.fa-file-video:before{content:"\f1c8"}.fa-file-medical-alt:before,.fa-file-waveform:before{content:"\f478"}.fa-file-word:before{content:"\f1c2"}.fa-file-archive:before,.fa-file-zipper:before{content:"\f1c6"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-filter-circle-dollar:before,.fa-funnel-dollar:before{content:"\f662"}.fa-filter-circle-xmark:before{content:"\e17b"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-burner:before{content:"\e4f1"}.fa-fire-extinguisher:before{content:"\f134"}.fa-fire-alt:before,.fa-fire-flame-curved:before{content:"\f7e4"}.fa-burn:before,.fa-fire-flame-simple:before{content:"\f46a"}.fa-fish:before{content:"\f578"}.fa-fish-fins:before{content:"\e4f2"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flask-vial:before{content:"\e4f3"}.fa-floppy-disk:before,.fa-save:before{content:"\f0c7"}.fa-florin-sign:before{content:"\e184"}.fa-folder-blank:before,.fa-folder:before{content:"\f07b"}.fa-folder-closed:before{content:"\e185"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-folder-tree:before{content:"\f802"}.fa-font:before{content:"\f031"}.fa-football-ball:before,.fa-football:before{content:"\f44e"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before,.fa-forward-fast:before{content:"\f050"}.fa-forward-step:before,.fa-step-forward:before{content:"\f051"}.fa-franc-sign:before{content:"\e18f"}.fa-frog:before{content:"\f52e"}.fa-futbol-ball:before,.fa-futbol:before,.fa-soccer-ball:before{content:"\f1e3"}.fa-g:before{content:"\47"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-dashboard:before,.fa-gauge-med:before,.fa-gauge:before,.fa-tachometer-alt-average:before{content:"\f624"}.fa-gauge-high:before,.fa-tachometer-alt-fast:before,.fa-tachometer-alt:before{content:"\f625"}.fa-gauge-simple-med:before,.fa-gauge-simple:before,.fa-tachometer-average:before{content:"\f629"}.fa-gauge-simple-high:before,.fa-tachometer-fast:before,.fa-tachometer:before{content:"\f62a"}.fa-gavel:before,.fa-legal:before{content:"\f0e3"}.fa-cog:before,.fa-gear:before{content:"\f013"}.fa-cogs:before,.fa-gears:before{content:"\f085"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-glass-water:before{content:"\e4f4"}.fa-glass-water-droplet:before{content:"\e4f5"}.fa-glasses:before{content:"\f530"}.fa-globe:before{content:"\f0ac"}.fa-golf-ball-tee:before,.fa-golf-ball:before{content:"\f450"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before,.fa-mortar-board:before{content:"\f19d"}.fa-greater-than:before{content:"\3e"}.fa-greater-than-equal:before{content:"\f532"}.fa-grip-horizontal:before,.fa-grip:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-group-arrows-rotate:before{content:"\e4f6"}.fa-guarani-sign:before{content:"\e19a"}.fa-guitar:before{content:"\f7a6"}.fa-gun:before{content:"\e19b"}.fa-h:before{content:"\48"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-paper:before,.fa-hand:before{content:"\f256"}.fa-hand-back-fist:before,.fa-hand-rock:before{content:"\f255"}.fa-allergies:before,.fa-hand-dots:before{content:"\f461"}.fa-fist-raised:before,.fa-hand-fist:before{content:"\f6de"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-dollar:before,.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-holding-droplet:before,.fa-hand-holding-water:before{content:"\f4c1"}.fa-hand-holding-hand:before{content:"\e4f7"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-medical:before{content:"\e05c"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-middle-finger:before{content:"\f806"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-sparkles:before{content:"\e05d"}.fa-hand-spock:before{content:"\f259"}.fa-handcuffs:before{content:"\e4f8"}.fa-hands:before,.fa-sign-language:before,.fa-signing:before{content:"\f2a7"}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before,.fa-hands-american-sign-language-interpreting:before,.fa-hands-asl-interpreting:before{content:"\f2a3"}.fa-hands-bound:before{content:"\e4f9"}.fa-hands-bubbles:before,.fa-hands-wash:before{content:"\e05e"}.fa-hands-clapping:before{content:"\e1a8"}.fa-hands-holding:before{content:"\f4c2"}.fa-hands-holding-child:before{content:"\e4fa"}.fa-hands-holding-circle:before{content:"\e4fb"}.fa-hands-praying:before,.fa-praying-hands:before{content:"\f684"}.fa-handshake:before{content:"\f2b5"}.fa-hands-helping:before,.fa-handshake-angle:before{content:"\f4c4"}.fa-handshake-alt:before,.fa-handshake-simple:before{content:"\f4c6"}.fa-handshake-alt-slash:before,.fa-handshake-simple-slash:before{content:"\e05f"}.fa-handshake-slash:before{content:"\e060"}.fa-hanukiah:before{content:"\f6e6"}.fa-hard-drive:before,.fa-hdd:before{content:"\f0a0"}.fa-hashtag:before{content:"\23"}.fa-hat-cowboy:before{content:"\f8c0"}.fa-hat-cowboy-side:before{content:"\f8c1"}.fa-hat-wizard:before{content:"\f6e8"}.fa-head-side-cough:before{content:"\e061"}.fa-head-side-cough-slash:before{content:"\e062"}.fa-head-side-mask:before{content:"\e063"}.fa-head-side-virus:before{content:"\e064"}.fa-header:before,.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before,.fa-headphones-simple:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-circle-bolt:before{content:"\e4fc"}.fa-heart-circle-check:before{content:"\e4fd"}.fa-heart-circle-exclamation:before{content:"\e4fe"}.fa-heart-circle-minus:before{content:"\e4ff"}.fa-heart-circle-plus:before{content:"\e500"}.fa-heart-circle-xmark:before{content:"\e501"}.fa-heart-broken:before,.fa-heart-crack:before{content:"\f7a9"}.fa-heart-pulse:before,.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-helicopter-symbol:before{content:"\e502"}.fa-hard-hat:before,.fa-hat-hard:before,.fa-helmet-safety:before{content:"\f807"}.fa-helmet-un:before{content:"\e503"}.fa-highlighter:before{content:"\f591"}.fa-hill-avalanche:before{content:"\e507"}.fa-hill-rockslide:before{content:"\e508"}.fa-hippo:before{content:"\f6ed"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital-alt:before,.fa-hospital-wide:before,.fa-hospital:before{content:"\f0f8"}.fa-hospital-user:before{content:"\f80d"}.fa-hot-tub-person:before,.fa-hot-tub:before{content:"\f593"}.fa-hotdog:before{content:"\f80f"}.fa-hotel:before{content:"\f594"}.fa-hourglass-empty:before,.fa-hourglass:before{content:"\f254"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-home-alt:before,.fa-home-lg-alt:before,.fa-home:before,.fa-house:before{content:"\f015"}.fa-home-lg:before,.fa-house-chimney:before{content:"\e3af"}.fa-house-chimney-crack:before,.fa-house-damage:before{content:"\f6f1"}.fa-clinic-medical:before,.fa-house-chimney-medical:before{content:"\f7f2"}.fa-house-chimney-user:before{content:"\e065"}.fa-house-chimney-window:before{content:"\e00d"}.fa-house-circle-check:before{content:"\e509"}.fa-house-circle-exclamation:before{content:"\e50a"}.fa-house-circle-xmark:before{content:"\e50b"}.fa-house-crack:before{content:"\e3b1"}.fa-house-fire:before{content:"\e50c"}.fa-house-flag:before{content:"\e50d"}.fa-house-flood-water:before{content:"\e50e"}.fa-house-flood-water-circle-arrow-right:before{content:"\e50f"}.fa-house-laptop:before,.fa-laptop-house:before{content:"\e066"}.fa-house-lock:before{content:"\e510"}.fa-house-medical:before{content:"\e3b2"}.fa-house-medical-circle-check:before{content:"\e511"}.fa-house-medical-circle-exclamation:before{content:"\e512"}.fa-house-medical-circle-xmark:before{content:"\e513"}.fa-house-medical-flag:before{content:"\e514"}.fa-house-signal:before{content:"\e012"}.fa-house-tsunami:before{content:"\e515"}.fa-home-user:before,.fa-house-user:before{content:"\e1b0"}.fa-hryvnia-sign:before,.fa-hryvnia:before{content:"\f6f2"}.fa-hurricane:before{content:"\f751"}.fa-i:before{content:"\49"}.fa-i-cursor:before{content:"\f246"}.fa-ice-cream:before{content:"\f810"}.fa-icicles:before{content:"\f7ad"}.fa-heart-music-camera-bolt:before,.fa-icons:before{content:"\f86d"}.fa-id-badge:before{content:"\f2c1"}.fa-drivers-license:before,.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before,.fa-id-card-clip:before{content:"\f47f"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-image-portrait:before,.fa-portrait:before{content:"\f3e0"}.fa-images:before{content:"\f302"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-indian-rupee-sign:before,.fa-indian-rupee:before,.fa-inr:before{content:"\e1bc"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-italic:before{content:"\f033"}.fa-j:before{content:"\4a"}.fa-jar:before{content:"\e516"}.fa-jar-wheat:before{content:"\e517"}.fa-jedi:before{content:"\f669"}.fa-fighter-jet:before,.fa-jet-fighter:before{content:"\f0fb"}.fa-jet-fighter-up:before{content:"\e518"}.fa-joint:before{content:"\f595"}.fa-jug-detergent:before{content:"\e519"}.fa-k:before{content:"\4b"}.fa-kaaba:before{content:"\f66b"}.fa-key:before{content:"\f084"}.fa-keyboard:before{content:"\f11c"}.fa-khanda:before{content:"\f66d"}.fa-kip-sign:before{content:"\e1c4"}.fa-first-aid:before,.fa-kit-medical:before{content:"\f479"}.fa-kitchen-set:before{content:"\e51a"}.fa-kiwi-bird:before{content:"\f535"}.fa-l:before{content:"\4c"}.fa-land-mine-on:before{content:"\e51b"}.fa-landmark:before{content:"\f66f"}.fa-landmark-alt:before,.fa-landmark-dome:before{content:"\f752"}.fa-landmark-flag:before{content:"\e51c"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laptop-file:before{content:"\e51d"}.fa-laptop-medical:before{content:"\f812"}.fa-lari-sign:before{content:"\e1c8"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-left-long:before,.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-arrows-alt-h:before,.fa-left-right:before{content:"\f337"}.fa-lemon:before{content:"\f094"}.fa-less-than:before{content:"\3c"}.fa-less-than-equal:before{content:"\f537"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-lines-leaning:before{content:"\e51e"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-chain-broken:before,.fa-chain-slash:before,.fa-link-slash:before,.fa-unlink:before{content:"\f127"}.fa-lira-sign:before{content:"\f195"}.fa-list-squares:before,.fa-list:before{content:"\f03a"}.fa-list-check:before,.fa-tasks:before{content:"\f0ae"}.fa-list-1-2:before,.fa-list-numeric:before,.fa-list-ol:before{content:"\f0cb"}.fa-list-dots:before,.fa-list-ul:before{content:"\f0ca"}.fa-litecoin-sign:before{content:"\e1d3"}.fa-location-arrow:before{content:"\f124"}.fa-location-crosshairs:before,.fa-location:before{content:"\f601"}.fa-location-dot:before,.fa-map-marker-alt:before{content:"\f3c5"}.fa-location-pin:before,.fa-map-marker:before{content:"\f041"}.fa-location-pin-lock:before{content:"\e51f"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-locust:before{content:"\e520"}.fa-lungs:before{content:"\f604"}.fa-lungs-virus:before{content:"\e067"}.fa-m:before{content:"\4d"}.fa-magnet:before{content:"\f076"}.fa-magnifying-glass:before,.fa-search:before{content:"\f002"}.fa-magnifying-glass-arrow-right:before{content:"\e521"}.fa-magnifying-glass-chart:before{content:"\e522"}.fa-magnifying-glass-dollar:before,.fa-search-dollar:before{content:"\f688"}.fa-magnifying-glass-location:before,.fa-search-location:before{content:"\f689"}.fa-magnifying-glass-minus:before,.fa-search-minus:before{content:"\f010"}.fa-magnifying-glass-plus:before,.fa-search-plus:before{content:"\f00e"}.fa-manat-sign:before{content:"\e1d5"}.fa-map:before{content:"\f279"}.fa-map-location:before,.fa-map-marked:before{content:"\f59f"}.fa-map-location-dot:before,.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-pin:before{content:"\f276"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-and-venus:before{content:"\f224"}.fa-mars-and-venus-burst:before{content:"\e523"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before,.fa-mars-stroke-right:before{content:"\f22b"}.fa-mars-stroke-up:before,.fa-mars-stroke-v:before{content:"\f22a"}.fa-glass-martini-alt:before,.fa-martini-glass:before{content:"\f57b"}.fa-cocktail:before,.fa-martini-glass-citrus:before{content:"\f561"}.fa-glass-martini:before,.fa-martini-glass-empty:before{content:"\f000"}.fa-mask:before{content:"\f6fa"}.fa-mask-face:before{content:"\e1d7"}.fa-mask-ventilator:before{content:"\e524"}.fa-masks-theater:before,.fa-theater-masks:before{content:"\f630"}.fa-mattress-pillow:before{content:"\e525"}.fa-expand-arrows-alt:before,.fa-maximize:before{content:"\f31e"}.fa-medal:before{content:"\f5a2"}.fa-memory:before{content:"\f538"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-comment-alt:before,.fa-message:before{content:"\f27a"}.fa-meteor:before{content:"\f753"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before,.fa-microphone-lines:before{content:"\f3c9"}.fa-microphone-alt-slash:before,.fa-microphone-lines-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-mill-sign:before{content:"\e1ed"}.fa-compress-arrows-alt:before,.fa-minimize:before{content:"\f78c"}.fa-minus:before,.fa-subtract:before{content:"\f068"}.fa-mitten:before{content:"\f7b5"}.fa-mobile-android:before,.fa-mobile-phone:before,.fa-mobile:before{content:"\f3ce"}.fa-mobile-button:before{content:"\f10b"}.fa-mobile-retro:before{content:"\e527"}.fa-mobile-android-alt:before,.fa-mobile-screen:before{content:"\f3cf"}.fa-mobile-alt:before,.fa-mobile-screen-button:before{content:"\f3cd"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-1:before,.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-1-wave:before,.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-bill-transfer:before{content:"\e528"}.fa-money-bill-trend-up:before{content:"\e529"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wheat:before{content:"\e52a"}.fa-money-bills:before{content:"\e1f3"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before,.fa-money-check-dollar:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-mosquito:before{content:"\e52b"}.fa-mosquito-net:before{content:"\e52c"}.fa-motorcycle:before{content:"\f21c"}.fa-mound:before{content:"\e52d"}.fa-mountain:before{content:"\f6fc"}.fa-mountain-city:before{content:"\e52e"}.fa-mountain-sun:before{content:"\e52f"}.fa-mug-hot:before{content:"\f7b6"}.fa-coffee:before,.fa-mug-saucer:before{content:"\f0f4"}.fa-music:before{content:"\f001"}.fa-n:before{content:"\4e"}.fa-naira-sign:before{content:"\e1f6"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-not-equal:before{content:"\f53e"}.fa-notdef:before{content:"\e1fe"}.fa-note-sticky:before,.fa-sticky-note:before{content:"\f249"}.fa-notes-medical:before{content:"\f481"}.fa-o:before{content:"\4f"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-oil-can:before{content:"\f613"}.fa-oil-well:before{content:"\e532"}.fa-om:before{content:"\f679"}.fa-otter:before{content:"\f700"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-p:before{content:"\50"}.fa-pager:before{content:"\f815"}.fa-paint-roller:before{content:"\f5aa"}.fa-paint-brush:before,.fa-paintbrush:before{content:"\f1fc"}.fa-palette:before{content:"\f53f"}.fa-pallet:before{content:"\f482"}.fa-panorama:before{content:"\e209"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-passport:before{content:"\f5ab"}.fa-file-clipboard:before,.fa-paste:before{content:"\f0ea"}.fa-pause:before{content:"\f04c"}.fa-paw:before{content:"\f1b0"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before,.fa-pen-clip:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-ruler:before,.fa-pencil-ruler:before{content:"\f5ae"}.fa-edit:before,.fa-pen-to-square:before{content:"\f044"}.fa-pencil-alt:before,.fa-pencil:before{content:"\f303"}.fa-people-arrows-left-right:before,.fa-people-arrows:before{content:"\e068"}.fa-people-carry-box:before,.fa-people-carry:before{content:"\f4ce"}.fa-people-group:before{content:"\e533"}.fa-people-line:before{content:"\e534"}.fa-people-pulling:before{content:"\e535"}.fa-people-robbery:before{content:"\e536"}.fa-people-roof:before{content:"\e537"}.fa-pepper-hot:before{content:"\f816"}.fa-percent:before,.fa-percentage:before{content:"\25"}.fa-male:before,.fa-person:before{content:"\f183"}.fa-person-arrow-down-to-line:before{content:"\e538"}.fa-person-arrow-up-from-line:before{content:"\e539"}.fa-biking:before,.fa-person-biking:before{content:"\f84a"}.fa-person-booth:before{content:"\f756"}.fa-person-breastfeeding:before{content:"\e53a"}.fa-person-burst:before{content:"\e53b"}.fa-person-cane:before{content:"\e53c"}.fa-person-chalkboard:before{content:"\e53d"}.fa-person-circle-check:before{content:"\e53e"}.fa-person-circle-exclamation:before{content:"\e53f"}.fa-person-circle-minus:before{content:"\e540"}.fa-person-circle-plus:before{content:"\e541"}.fa-person-circle-question:before{content:"\e542"}.fa-person-circle-xmark:before{content:"\e543"}.fa-digging:before,.fa-person-digging:before{content:"\f85e"}.fa-diagnoses:before,.fa-person-dots-from-line:before{content:"\f470"}.fa-female:before,.fa-person-dress:before{content:"\f182"}.fa-person-dress-burst:before{content:"\e544"}.fa-person-drowning:before{content:"\e545"}.fa-person-falling:before{content:"\e546"}.fa-person-falling-burst:before{content:"\e547"}.fa-person-half-dress:before{content:"\e548"}.fa-person-harassing:before{content:"\e549"}.fa-hiking:before,.fa-person-hiking:before{content:"\f6ec"}.fa-person-military-pointing:before{content:"\e54a"}.fa-person-military-rifle:before{content:"\e54b"}.fa-person-military-to-person:before{content:"\e54c"}.fa-person-praying:before,.fa-pray:before{content:"\f683"}.fa-person-pregnant:before{content:"\e31e"}.fa-person-rays:before{content:"\e54d"}.fa-person-rifle:before{content:"\e54e"}.fa-person-running:before,.fa-running:before{content:"\f70c"}.fa-person-shelter:before{content:"\e54f"}.fa-person-skating:before,.fa-skating:before{content:"\f7c5"}.fa-person-skiing:before,.fa-skiing:before{content:"\f7c9"}.fa-person-skiing-nordic:before,.fa-skiing-nordic:before{content:"\f7ca"}.fa-person-snowboarding:before,.fa-snowboarding:before{content:"\f7ce"}.fa-person-swimming:before,.fa-swimmer:before{content:"\f5c4"}.fa-person-through-window:before{content:"\e5a9"}.fa-person-walking:before,.fa-walking:before{content:"\f554"}.fa-person-walking-arrow-loop-left:before{content:"\e551"}.fa-person-walking-arrow-right:before{content:"\e552"}.fa-person-walking-dashed-line-arrow-right:before{content:"\e553"}.fa-person-walking-luggage:before{content:"\e554"}.fa-blind:before,.fa-person-walking-with-cane:before{content:"\f29d"}.fa-peseta-sign:before{content:"\e221"}.fa-peso-sign:before{content:"\e222"}.fa-phone:before{content:"\f095"}.fa-phone-alt:before,.fa-phone-flip:before{content:"\f879"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-volume:before,.fa-volume-control-phone:before{content:"\f2a0"}.fa-photo-film:before,.fa-photo-video:before{content:"\f87c"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pizza-slice:before{content:"\f818"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-circle-check:before{content:"\e555"}.fa-plane-circle-exclamation:before{content:"\e556"}.fa-plane-circle-xmark:before{content:"\e557"}.fa-plane-departure:before{content:"\f5b0"}.fa-plane-lock:before{content:"\e558"}.fa-plane-slash:before{content:"\e069"}.fa-plane-up:before{content:"\e22d"}.fa-plant-wilt:before{content:"\e5aa"}.fa-plate-wheat:before{content:"\e55a"}.fa-play:before{content:"\f04b"}.fa-plug:before{content:"\f1e6"}.fa-plug-circle-bolt:before{content:"\e55b"}.fa-plug-circle-check:before{content:"\e55c"}.fa-plug-circle-exclamation:before{content:"\e55d"}.fa-plug-circle-minus:before{content:"\e55e"}.fa-plug-circle-plus:before{content:"\e55f"}.fa-plug-circle-xmark:before{content:"\e560"}.fa-add:before,.fa-plus:before{content:"\2b"}.fa-plus-minus:before{content:"\e43c"}.fa-podcast:before{content:"\f2ce"}.fa-poo:before{content:"\f2fe"}.fa-poo-bolt:before,.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-power-off:before{content:"\f011"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before,.fa-prescription-bottle-medical:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-pump-medical:before{content:"\e06a"}.fa-pump-soap:before{content:"\e06b"}.fa-puzzle-piece:before{content:"\f12e"}.fa-q:before{content:"\51"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\3f"}.fa-quote-left-alt:before,.fa-quote-left:before{content:"\f10d"}.fa-quote-right-alt:before,.fa-quote-right:before{content:"\f10e"}.fa-r:before{content:"\52"}.fa-radiation:before{content:"\f7b9"}.fa-radio:before{content:"\f8d7"}.fa-rainbow:before{content:"\f75b"}.fa-ranking-star:before{content:"\e561"}.fa-receipt:before{content:"\f543"}.fa-record-vinyl:before{content:"\f8d9"}.fa-ad:before,.fa-rectangle-ad:before{content:"\f641"}.fa-list-alt:before,.fa-rectangle-list:before{content:"\f022"}.fa-rectangle-times:before,.fa-rectangle-xmark:before,.fa-times-rectangle:before,.fa-window-close:before{content:"\f410"}.fa-recycle:before{content:"\f1b8"}.fa-registered:before{content:"\f25d"}.fa-repeat:before{content:"\f363"}.fa-mail-reply:before,.fa-reply:before{content:"\f3e5"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-republican:before{content:"\f75e"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-ribbon:before{content:"\f4d6"}.fa-right-from-bracket:before,.fa-sign-out-alt:before{content:"\f2f5"}.fa-exchange-alt:before,.fa-right-left:before{content:"\f362"}.fa-long-arrow-alt-right:before,.fa-right-long:before{content:"\f30b"}.fa-right-to-bracket:before,.fa-sign-in-alt:before{content:"\f2f6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-road-barrier:before{content:"\e562"}.fa-road-bridge:before{content:"\e563"}.fa-road-circle-check:before{content:"\e564"}.fa-road-circle-exclamation:before{content:"\e565"}.fa-road-circle-xmark:before{content:"\e566"}.fa-road-lock:before{content:"\e567"}.fa-road-spikes:before{content:"\e568"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rotate:before,.fa-sync-alt:before{content:"\f2f1"}.fa-rotate-back:before,.fa-rotate-backward:before,.fa-rotate-left:before,.fa-undo-alt:before{content:"\f2ea"}.fa-redo-alt:before,.fa-rotate-forward:before,.fa-rotate-right:before{content:"\f2f9"}.fa-route:before{content:"\f4d7"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-rouble:before,.fa-rub:before,.fa-ruble-sign:before,.fa-ruble:before{content:"\f158"}.fa-rug:before{content:"\e569"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-rupee-sign:before,.fa-rupee:before{content:"\f156"}.fa-rupiah-sign:before{content:"\e23d"}.fa-s:before{content:"\53"}.fa-sack-dollar:before{content:"\f81d"}.fa-sack-xmark:before{content:"\e56a"}.fa-sailboat:before{content:"\e445"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-balance-scale:before,.fa-scale-balanced:before{content:"\f24e"}.fa-balance-scale-left:before,.fa-scale-unbalanced:before{content:"\f515"}.fa-balance-scale-right:before,.fa-scale-unbalanced-flip:before{content:"\f516"}.fa-school:before{content:"\f549"}.fa-school-circle-check:before{content:"\e56b"}.fa-school-circle-exclamation:before{content:"\e56c"}.fa-school-circle-xmark:before{content:"\e56d"}.fa-school-flag:before{content:"\e56e"}.fa-school-lock:before{content:"\e56f"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-screwdriver:before{content:"\f54a"}.fa-screwdriver-wrench:before,.fa-tools:before{content:"\f7d9"}.fa-scroll:before{content:"\f70e"}.fa-scroll-torah:before,.fa-torah:before{content:"\f6a0"}.fa-sd-card:before{content:"\f7c2"}.fa-section:before{content:"\e447"}.fa-seedling:before,.fa-sprout:before{content:"\f4d8"}.fa-server:before{content:"\f233"}.fa-shapes:before,.fa-triangle-circle-square:before{content:"\f61f"}.fa-arrow-turn-right:before,.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-share-from-square:before,.fa-share-square:before{content:"\f14d"}.fa-share-alt:before,.fa-share-nodes:before{content:"\f1e0"}.fa-sheet-plastic:before{content:"\e571"}.fa-ils:before,.fa-shekel-sign:before,.fa-shekel:before,.fa-sheqel-sign:before,.fa-sheqel:before{content:"\f20b"}.fa-shield-blank:before,.fa-shield:before{content:"\f132"}.fa-shield-cat:before{content:"\e572"}.fa-shield-dog:before{content:"\e573"}.fa-shield-alt:before,.fa-shield-halved:before{content:"\f3ed"}.fa-shield-heart:before{content:"\e574"}.fa-shield-virus:before{content:"\e06c"}.fa-ship:before{content:"\f21a"}.fa-shirt:before,.fa-t-shirt:before,.fa-tshirt:before{content:"\f553"}.fa-shoe-prints:before{content:"\f54b"}.fa-shop:before,.fa-store-alt:before{content:"\f54f"}.fa-shop-lock:before{content:"\e4a5"}.fa-shop-slash:before,.fa-store-alt-slash:before{content:"\e070"}.fa-shower:before{content:"\f2cc"}.fa-shrimp:before{content:"\e448"}.fa-random:before,.fa-shuffle:before{content:"\f074"}.fa-shuttle-space:before,.fa-space-shuttle:before{content:"\f197"}.fa-sign-hanging:before,.fa-sign:before{content:"\f4d9"}.fa-signal-5:before,.fa-signal-perfect:before,.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-map-signs:before,.fa-signs-post:before{content:"\f277"}.fa-sim-card:before{content:"\f7c4"}.fa-sink:before{content:"\e06d"}.fa-sitemap:before{content:"\f0e8"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before,.fa-sliders:before{content:"\f1de"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-soap:before{content:"\e06e"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before,.fa-unsorted:before{content:"\f0dc"}.fa-sort-desc:before,.fa-sort-down:before{content:"\f0dd"}.fa-sort-asc:before,.fa-sort-up:before{content:"\f0de"}.fa-spa:before{content:"\f5bb"}.fa-pastafarianism:before,.fa-spaghetti-monster-flying:before{content:"\f67b"}.fa-spell-check:before{content:"\f891"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spoon:before,.fa-utensil-spoon:before{content:"\f2e5"}.fa-spray-can:before{content:"\f5bd"}.fa-air-freshener:before,.fa-spray-can-sparkles:before{content:"\f5d0"}.fa-square:before{content:"\f0c8"}.fa-external-link-square:before,.fa-square-arrow-up-right:before{content:"\f14c"}.fa-caret-square-down:before,.fa-square-caret-down:before{content:"\f150"}.fa-caret-square-left:before,.fa-square-caret-left:before{content:"\f191"}.fa-caret-square-right:before,.fa-square-caret-right:before{content:"\f152"}.fa-caret-square-up:before,.fa-square-caret-up:before{content:"\f151"}.fa-check-square:before,.fa-square-check:before{content:"\f14a"}.fa-envelope-square:before,.fa-square-envelope:before{content:"\f199"}.fa-square-full:before{content:"\f45c"}.fa-h-square:before,.fa-square-h:before{content:"\f0fd"}.fa-minus-square:before,.fa-square-minus:before{content:"\f146"}.fa-square-nfi:before{content:"\e576"}.fa-parking:before,.fa-square-parking:before{content:"\f540"}.fa-pen-square:before,.fa-pencil-square:before,.fa-square-pen:before{content:"\f14b"}.fa-square-person-confined:before{content:"\e577"}.fa-phone-square:before,.fa-square-phone:before{content:"\f098"}.fa-phone-square-alt:before,.fa-square-phone-flip:before{content:"\f87b"}.fa-plus-square:before,.fa-square-plus:before{content:"\f0fe"}.fa-poll-h:before,.fa-square-poll-horizontal:before{content:"\f682"}.fa-poll:before,.fa-square-poll-vertical:before{content:"\f681"}.fa-square-root-alt:before,.fa-square-root-variable:before{content:"\f698"}.fa-rss-square:before,.fa-square-rss:before{content:"\f143"}.fa-share-alt-square:before,.fa-square-share-nodes:before{content:"\f1e1"}.fa-external-link-square-alt:before,.fa-square-up-right:before{content:"\f360"}.fa-square-virus:before{content:"\e578"}.fa-square-xmark:before,.fa-times-square:before,.fa-xmark-square:before{content:"\f2d3"}.fa-rod-asclepius:before,.fa-rod-snake:before,.fa-staff-aesculapius:before,.fa-staff-snake:before{content:"\e579"}.fa-stairs:before{content:"\e289"}.fa-stamp:before{content:"\f5bf"}.fa-stapler:before{content:"\e5af"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before,.fa-star-half-stroke:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-gbp:before,.fa-pound-sign:before,.fa-sterling-sign:before{content:"\f154"}.fa-stethoscope:before{content:"\f0f1"}.fa-stop:before{content:"\f04d"}.fa-stopwatch:before{content:"\f2f2"}.fa-stopwatch-20:before{content:"\e06f"}.fa-store:before{content:"\f54e"}.fa-store-slash:before{content:"\e071"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stroopwafel:before{content:"\f551"}.fa-subscript:before{content:"\f12c"}.fa-suitcase:before{content:"\f0f2"}.fa-medkit:before,.fa-suitcase-medical:before{content:"\f0fa"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-sun-plant-wilt:before{content:"\e57a"}.fa-superscript:before{content:"\f12b"}.fa-swatchbook:before{content:"\f5c3"}.fa-synagogue:before{content:"\f69b"}.fa-syringe:before{content:"\f48e"}.fa-t:before{content:"\54"}.fa-table:before{content:"\f0ce"}.fa-table-cells:before,.fa-th:before{content:"\f00a"}.fa-table-cells-large:before,.fa-th-large:before{content:"\f009"}.fa-columns:before,.fa-table-columns:before{content:"\f0db"}.fa-table-list:before,.fa-th-list:before{content:"\f00b"}.fa-ping-pong-paddle-ball:before,.fa-table-tennis-paddle-ball:before,.fa-table-tennis:before{content:"\f45d"}.fa-tablet-android:before,.fa-tablet:before{content:"\f3fb"}.fa-tablet-button:before{content:"\f10a"}.fa-tablet-alt:before,.fa-tablet-screen-button:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-digital-tachograph:before,.fa-tachograph-digital:before{content:"\f566"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tarp:before{content:"\e57b"}.fa-tarp-droplet:before{content:"\e57c"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-temperature-arrow-down:before,.fa-temperature-down:before{content:"\e03f"}.fa-temperature-arrow-up:before,.fa-temperature-up:before{content:"\e040"}.fa-temperature-0:before,.fa-temperature-empty:before,.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\f2cb"}.fa-temperature-4:before,.fa-temperature-full:before,.fa-thermometer-4:before,.fa-thermometer-full:before{content:"\f2c7"}.fa-temperature-2:before,.fa-temperature-half:before,.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\f2c9"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-temperature-1:before,.fa-temperature-quarter:before,.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\f2ca"}.fa-temperature-3:before,.fa-temperature-three-quarters:before,.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-tenge-sign:before,.fa-tenge:before{content:"\f7d7"}.fa-tent:before{content:"\e57d"}.fa-tent-arrow-down-to-line:before{content:"\e57e"}.fa-tent-arrow-left-right:before{content:"\e57f"}.fa-tent-arrow-turn-left:before{content:"\e580"}.fa-tent-arrows-down:before{content:"\e581"}.fa-tents:before{content:"\e582"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-remove-format:before,.fa-text-slash:before{content:"\f87d"}.fa-text-width:before{content:"\f035"}.fa-thermometer:before{content:"\f491"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumb-tack:before,.fa-thumbtack:before{content:"\f08d"}.fa-ticket:before{content:"\f145"}.fa-ticket-alt:before,.fa-ticket-simple:before{content:"\f3ff"}.fa-timeline:before{content:"\e29c"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toilet-paper-slash:before{content:"\e072"}.fa-toilet-portable:before{content:"\e583"}.fa-toilets-portable:before{content:"\e584"}.fa-toolbox:before{content:"\f552"}.fa-tooth:before{content:"\f5c9"}.fa-torii-gate:before{content:"\f6a1"}.fa-tornado:before{content:"\f76f"}.fa-broadcast-tower:before,.fa-tower-broadcast:before{content:"\f519"}.fa-tower-cell:before{content:"\e585"}.fa-tower-observation:before{content:"\e586"}.fa-tractor:before{content:"\f722"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-trailer:before{content:"\e041"}.fa-train:before{content:"\f238"}.fa-subway:before,.fa-train-subway:before{content:"\f239"}.fa-train-tram:before{content:"\e5b4"}.fa-transgender-alt:before,.fa-transgender:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-arrow-up:before,.fa-trash-restore:before{content:"\f829"}.fa-trash-alt:before,.fa-trash-can:before{content:"\f2ed"}.fa-trash-can-arrow-up:before,.fa-trash-restore-alt:before{content:"\f82a"}.fa-tree:before{content:"\f1bb"}.fa-tree-city:before{content:"\e587"}.fa-exclamation-triangle:before,.fa-triangle-exclamation:before,.fa-warning:before{content:"\f071"}.fa-trophy:before{content:"\f091"}.fa-trowel:before{content:"\e589"}.fa-trowel-bricks:before{content:"\e58a"}.fa-truck:before{content:"\f0d1"}.fa-truck-arrow-right:before{content:"\e58b"}.fa-truck-droplet:before{content:"\e58c"}.fa-shipping-fast:before,.fa-truck-fast:before{content:"\f48b"}.fa-truck-field:before{content:"\e58d"}.fa-truck-field-un:before{content:"\e58e"}.fa-truck-front:before{content:"\e2b7"}.fa-ambulance:before,.fa-truck-medical:before{content:"\f0f9"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-truck-plane:before{content:"\e58f"}.fa-truck-loading:before,.fa-truck-ramp-box:before{content:"\f4de"}.fa-teletype:before,.fa-tty:before{content:"\f1e4"}.fa-try:before,.fa-turkish-lira-sign:before,.fa-turkish-lira:before{content:"\e2bb"}.fa-level-down-alt:before,.fa-turn-down:before{content:"\f3be"}.fa-level-up-alt:before,.fa-turn-up:before{content:"\f3bf"}.fa-television:before,.fa-tv-alt:before,.fa-tv:before{content:"\f26c"}.fa-u:before{content:"\55"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-underline:before{content:"\f0cd"}.fa-universal-access:before{content:"\f29a"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before,.fa-unlock-keyhole:before{content:"\f13e"}.fa-arrows-alt-v:before,.fa-up-down:before{content:"\f338"}.fa-arrows-alt:before,.fa-up-down-left-right:before{content:"\f0b2"}.fa-long-arrow-alt-up:before,.fa-up-long:before{content:"\f30c"}.fa-expand-alt:before,.fa-up-right-and-down-left-from-center:before{content:"\f424"}.fa-external-link-alt:before,.fa-up-right-from-square:before{content:"\f35d"}.fa-upload:before{content:"\f093"}.fa-user:before{content:"\f007"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-clock:before{content:"\f4fd"}.fa-user-doctor:before,.fa-user-md:before{content:"\f0f0"}.fa-user-cog:before,.fa-user-gear:before{content:"\f4fe"}.fa-user-graduate:before{content:"\f501"}.fa-user-friends:before,.fa-user-group:before{content:"\f500"}.fa-user-injured:before{content:"\f728"}.fa-user-alt:before,.fa-user-large:before{content:"\f406"}.fa-user-alt-slash:before,.fa-user-large-slash:before{content:"\f4fa"}.fa-user-lock:before{content:"\f502"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-nurse:before{content:"\f82f"}.fa-user-edit:before,.fa-user-pen:before{content:"\f4ff"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before,.fa-user-xmark:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-between-lines:before{content:"\e591"}.fa-users-cog:before,.fa-users-gear:before{content:"\f509"}.fa-users-line:before{content:"\e592"}.fa-users-rays:before{content:"\e593"}.fa-users-rectangle:before{content:"\e594"}.fa-users-slash:before{content:"\e073"}.fa-users-viewfinder:before{content:"\e595"}.fa-cutlery:before,.fa-utensils:before{content:"\f2e7"}.fa-v:before{content:"\56"}.fa-shuttle-van:before,.fa-van-shuttle:before{content:"\f5b6"}.fa-vault:before{content:"\e2c5"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-vest:before{content:"\e085"}.fa-vest-patches:before{content:"\e086"}.fa-vial:before{content:"\f492"}.fa-vial-circle-check:before{content:"\e596"}.fa-vial-virus:before{content:"\e597"}.fa-vials:before{content:"\f493"}.fa-video-camera:before,.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-virus:before{content:"\e074"}.fa-virus-covid:before{content:"\e4a8"}.fa-virus-covid-slash:before{content:"\e4a9"}.fa-virus-slash:before{content:"\e075"}.fa-viruses:before{content:"\e076"}.fa-voicemail:before{content:"\f897"}.fa-volcano:before{content:"\f770"}.fa-volleyball-ball:before,.fa-volleyball:before{content:"\f45f"}.fa-volume-high:before,.fa-volume-up:before{content:"\f028"}.fa-volume-down:before,.fa-volume-low:before{content:"\f027"}.fa-volume-off:before{content:"\f026"}.fa-volume-mute:before,.fa-volume-times:before,.fa-volume-xmark:before{content:"\f6a9"}.fa-vr-cardboard:before{content:"\f729"}.fa-w:before{content:"\57"}.fa-walkie-talkie:before{content:"\f8ef"}.fa-wallet:before{content:"\f555"}.fa-magic:before,.fa-wand-magic:before{content:"\f0d0"}.fa-magic-wand-sparkles:before,.fa-wand-magic-sparkles:before{content:"\e2ca"}.fa-wand-sparkles:before{content:"\f72b"}.fa-warehouse:before{content:"\f494"}.fa-water:before{content:"\f773"}.fa-ladder-water:before,.fa-swimming-pool:before,.fa-water-ladder:before{content:"\f5c5"}.fa-wave-square:before{content:"\f83e"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weight-scale:before,.fa-weight:before{content:"\f496"}.fa-wheat-alt:before,.fa-wheat-awn:before{content:"\e2cd"}.fa-wheat-awn-circle-exclamation:before{content:"\e598"}.fa-wheelchair:before{content:"\f193"}.fa-wheelchair-alt:before,.fa-wheelchair-move:before{content:"\e2ce"}.fa-glass-whiskey:before,.fa-whiskey-glass:before{content:"\f7a0"}.fa-wifi-3:before,.fa-wifi-strong:before,.fa-wifi:before{content:"\f1eb"}.fa-wind:before{content:"\f72e"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before,.fa-wine-glass-empty:before{content:"\f5ce"}.fa-krw:before,.fa-won-sign:before,.fa-won:before{content:"\f159"}.fa-worm:before{content:"\e599"}.fa-wrench:before{content:"\f0ad"}.fa-x:before{content:"\58"}.fa-x-ray:before{content:"\f497"}.fa-close:before,.fa-multiply:before,.fa-remove:before,.fa-times:before,.fa-xmark:before{content:"\f00d"}.fa-xmarks-lines:before{content:"\e59a"}.fa-y:before{content:"\59"}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen-sign:before,.fa-yen:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-z:before{content:"\5a"}.fa-sr-only,.fa-sr-only-focusable:not(:focus),.sr-only,.sr-only-focusable:not(:focus){position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0}:host,:root{--fa-font-brands:normal 400 1em/1 "Font Awesome 6 Brands"}@font-face{font-family:"Font Awesome 6 Brands";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}.fa-brands,.fab{font-family:"Font Awesome 6 Brands";font-weight:400}.fa-42-group:before,.fa-innosoft:before{content:"\e080"}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-adn:before{content:"\f170"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-airbnb:before{content:"\f834"}.fa-algolia:before{content:"\f36c"}.fa-alipay:before{content:"\f642"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-amilia:before{content:"\f36d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-pay:before{content:"\f415"}.fa-artstation:before{content:"\f77a"}.fa-asymmetrik:before{content:"\f372"}.fa-atlassian:before{content:"\f77b"}.fa-audible:before{content:"\f373"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-aws:before{content:"\f375"}.fa-bandcamp:before{content:"\f2d5"}.fa-battle-net:before{content:"\f835"}.fa-behance:before{content:"\f1b4"}.fa-bilibili:before{content:"\e3d9"}.fa-bimobject:before{content:"\f378"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bootstrap:before{content:"\f836"}.fa-bots:before{content:"\e340"}.fa-btc:before{content:"\f15a"}.fa-buffer:before{content:"\f837"}.fa-buromobelexperte:before{content:"\f37f"}.fa-buy-n-large:before{content:"\f8a6"}.fa-buysellads:before{content:"\f20d"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-chrome:before{content:"\f268"}.fa-chromecast:before{content:"\f838"}.fa-cloudflare:before{content:"\e07d"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cmplid:before{content:"\e360"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cotton-bureau:before{content:"\f89e"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-critical-role:before{content:"\f6c9"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dailymotion:before{content:"\e052"}.fa-dashcube:before{content:"\f210"}.fa-deezer:before{content:"\e077"}.fa-delicious:before{content:"\f1a5"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dhl:before{content:"\f790"}.fa-diaspora:before{content:"\f791"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-draft2digital:before{content:"\f396"}.fa-dribbble:before{content:"\f17d"}.fa-dropbox:before{content:"\f16b"}.fa-drupal:before{content:"\f1a9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edge-legacy:before{content:"\e078"}.fa-elementor:before{content:"\f430"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envira:before{content:"\f299"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-etsy:before{content:"\f2d7"}.fa-evernote:before{content:"\f839"}.fa-expeditedssl:before{content:"\f23e"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-figma:before{content:"\f799"}.fa-firefox:before{content:"\f269"}.fa-firefox-browser:before{content:"\e007"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-fly:before{content:"\f417"}.fa-font-awesome-flag:before,.fa-font-awesome-logo-full:before,.fa-font-awesome:before{content:"\f2b4"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-fulcrum:before{content:"\f50b"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-git:before{content:"\f1d3"}.fa-git-alt:before{content:"\f841"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-gofore:before{content:"\f3a7"}.fa-golang:before{content:"\e40f"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-pay:before{content:"\e079"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-wallet:before{content:"\f1ee"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guilded:before{content:"\e07e"}.fa-gulp:before{content:"\f3ae"}.fa-hacker-news:before{content:"\f1d4"}.fa-hackerrank:before{content:"\f5f7"}.fa-hashnode:before{content:"\e499"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-hive:before{content:"\e07f"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-hotjar:before{content:"\f3b1"}.fa-houzz:before{content:"\f27c"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-ideal:before{content:"\e013"}.fa-imdb:before{content:"\f2d8"}.fa-instagram:before{content:"\f16d"}.fa-instalod:before{content:"\e081"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-itch-io:before{content:"\f83a"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joomla:before{content:"\f1aa"}.fa-js:before{content:"\f3b8"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaggle:before{content:"\f5fa"}.fa-keybase:before{content:"\f4f5"}.fa-keycdn:before{content:"\f3ba"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-korvue:before{content:"\f42f"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-leanpub:before{content:"\f212"}.fa-less:before{content:"\f41d"}.fa-line:before{content:"\f3c0"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-mailchimp:before{content:"\f59e"}.fa-mandalorian:before{content:"\f50f"}.fa-markdown:before{content:"\f60f"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-mdb:before{content:"\f8ca"}.fa-medapps:before{content:"\f3c6"}.fa-medium-m:before,.fa-medium:before{content:"\f23a"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-mendeley:before{content:"\f7b3"}.fa-meta:before{content:"\e49b"}.fa-microblog:before{content:"\e01a"}.fa-microsoft:before{content:"\f3ca"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mixer:before{content:"\e056"}.fa-mizuni:before{content:"\f3cc"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-nfc-directional:before{content:"\e530"}.fa-nfc-symbol:before{content:"\e531"}.fa-nimblr:before{content:"\f5a8"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-octopus-deploy:before{content:"\e082"}.fa-odnoklassniki:before{content:"\f263"}.fa-old-republic:before{content:"\f510"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-orcid:before{content:"\f8d2"}.fa-osi:before{content:"\f41a"}.fa-padlet:before{content:"\e4a0"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-palfed:before{content:"\f3d8"}.fa-patreon:before{content:"\f3d9"}.fa-paypal:before{content:"\f1ed"}.fa-perbyte:before{content:"\e083"}.fa-periscope:before{content:"\f3da"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pix:before{content:"\e43a"}.fa-playstation:before{content:"\f3df"}.fa-product-hunt:before{content:"\f288"}.fa-pushed:before{content:"\f3e1"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-r-project:before{content:"\f4f7"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-redhat:before{content:"\f7bc"}.fa-renren:before{content:"\f18b"}.fa-replyd:before{content:"\f3e6"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-rev:before{content:"\f5b2"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-rust:before{content:"\e07a"}.fa-safari:before{content:"\f267"}.fa-salesforce:before{content:"\f83b"}.fa-sass:before{content:"\f41e"}.fa-schlix:before{content:"\f3ea"}.fa-screenpal:before{content:"\e570"}.fa-scribd:before{content:"\f28a"}.fa-searchengin:before{content:"\f3eb"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-servicestack:before{content:"\f3ec"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shopify:before{content:"\e057"}.fa-shopware:before{content:"\f5b5"}.fa-simplybuilt:before{content:"\f215"}.fa-sistrix:before{content:"\f3ee"}.fa-sith:before{content:"\f512"}.fa-sitrox:before{content:"\e44a"}.fa-sketch:before{content:"\f7c6"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack-hash:before,.fa-slack:before{content:"\f198"}.fa-slideshare:before{content:"\f1e7"}.fa-snapchat-ghost:before,.fa-snapchat:before{content:"\f2ab"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-space-awesome:before{content:"\e5ac"}.fa-speakap:before{content:"\f3f3"}.fa-speaker-deck:before{content:"\f83c"}.fa-spotify:before{content:"\f1bc"}.fa-behance-square:before,.fa-square-behance:before{content:"\f1b5"}.fa-dribbble-square:before,.fa-square-dribbble:before{content:"\f397"}.fa-facebook-square:before,.fa-square-facebook:before{content:"\f082"}.fa-square-font-awesome:before{content:"\e5ad"}.fa-font-awesome-alt:before,.fa-square-font-awesome-stroke:before{content:"\f35c"}.fa-git-square:before,.fa-square-git:before{content:"\f1d2"}.fa-github-square:before,.fa-square-github:before{content:"\f092"}.fa-gitlab-square:before,.fa-square-gitlab:before{content:"\e5ae"}.fa-google-plus-square:before,.fa-square-google-plus:before{content:"\f0d4"}.fa-hacker-news-square:before,.fa-square-hacker-news:before{content:"\f3af"}.fa-instagram-square:before,.fa-square-instagram:before{content:"\e055"}.fa-js-square:before,.fa-square-js:before{content:"\f3b9"}.fa-lastfm-square:before,.fa-square-lastfm:before{content:"\f203"}.fa-odnoklassniki-square:before,.fa-square-odnoklassniki:before{content:"\f264"}.fa-pied-piper-square:before,.fa-square-pied-piper:before{content:"\e01e"}.fa-pinterest-square:before,.fa-square-pinterest:before{content:"\f0d3"}.fa-reddit-square:before,.fa-square-reddit:before{content:"\f1a2"}.fa-snapchat-square:before,.fa-square-snapchat:before{content:"\f2ad"}.fa-square-steam:before,.fa-steam-square:before{content:"\f1b7"}.fa-square-tumblr:before,.fa-tumblr-square:before{content:"\f174"}.fa-square-twitter:before,.fa-twitter-square:before{content:"\f081"}.fa-square-viadeo:before,.fa-viadeo-square:before{content:"\f2aa"}.fa-square-vimeo:before,.fa-vimeo-square:before{content:"\f194"}.fa-square-whatsapp:before,.fa-whatsapp-square:before{content:"\f40c"}.fa-square-xing:before,.fa-xing-square:before{content:"\f169"}.fa-square-youtube:before,.fa-youtube-square:before{content:"\f431"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stackpath:before{content:"\f842"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-symbol:before{content:"\f3f6"}.fa-sticker-mule:before{content:"\f3f7"}.fa-strava:before{content:"\f428"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-superpowers:before{content:"\f2dd"}.fa-supple:before{content:"\f3f9"}.fa-suse:before{content:"\f7d6"}.fa-swift:before{content:"\f8e1"}.fa-symfony:before{content:"\f83d"}.fa-teamspeak:before{content:"\f4f9"}.fa-telegram-plane:before,.fa-telegram:before{content:"\f2c6"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-the-red-yeti:before{content:"\f69d"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-think-peaks:before{content:"\f731"}.fa-tiktok:before{content:"\e07b"}.fa-trade-federation:before{content:"\f513"}.fa-trello:before{content:"\f181"}.fa-tumblr:before{content:"\f173"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbraco:before{content:"\f8e8"}.fa-uncharted:before{content:"\e084"}.fa-uniregistry:before{content:"\f404"}.fa-unity:before{content:"\e049"}.fa-unsplash:before{content:"\e07c"}.fa-untappd:before{content:"\f405"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-vaadin:before{content:"\f408"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viber:before{content:"\f409"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-vuejs:before{content:"\f41f"}.fa-watchman-monitoring:before{content:"\e087"}.fa-waze:before{content:"\f83f"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whmcs:before{content:"\f40d"}.fa-wikipedia-w:before{content:"\f266"}.fa-windows:before{content:"\f17a"}.fa-wirsindhandwerk:before,.fa-wsh:before{content:"\e2d0"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wodu:before{content:"\e088"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-rendact:before,.fa-wpressr:before{content:"\f3e4"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yammer:before{content:"\f840"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-zhihu:before{content:"\f63f"}:host,:root{--fa-font-regular:normal 400 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype")}.fa-regular,.far{font-family:"Font Awesome 6 Free";font-weight:400}:host,:root{--fa-font-solid:normal 900 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}.fa-solid,.fas{font-family:"Font Awesome 6 Free";font-weight:900}@font-face{font-family:"Font Awesome 5 Brands";font-display:block;font-weight:400;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}@font-face{font-family:"Font Awesome 5 Free";font-display:block;font-weight:900;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}@font-face{font-family:"Font Awesome 5 Free";font-display:block;font-weight:400;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype");unicode-range:u+f003,u+f006,u+f014,u+f016-f017,u+f01a-f01b,u+f01d,u+f022,u+f03e,u+f044,u+f046,u+f05c-f05d,u+f06e,u+f070,u+f087-f088,u+f08a,u+f094,u+f096-f097,u+f09d,u+f0a0,u+f0a2,u+f0a4-f0a7,u+f0c5,u+f0c7,u+f0e5-f0e6,u+f0eb,u+f0f6-f0f8,u+f10c,u+f114-f115,u+f118-f11a,u+f11c-f11d,u+f133,u+f147,u+f14e,u+f150-f152,u+f185-f186,u+f18e,u+f190-f192,u+f196,u+f1c1-f1c9,u+f1d9,u+f1db,u+f1e3,u+f1ea,u+f1f7,u+f1f9,u+f20a,u+f247-f248,u+f24a,u+f24d,u+f255-f25b,u+f25d,u+f271-f274,u+f278,u+f27b,u+f28c,u+f28e,u+f29c,u+f2b5,u+f2b7,u+f2ba,u+f2bc,u+f2be,u+f2c0-f2c1,u+f2c3,u+f2d0,u+f2d2,u+f2d4,u+f2dc}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-v4compatibility.woff2) format("woff2"),url(../webfonts/fa-v4compatibility.ttf) format("truetype");unicode-range:u+f041,u+f047,u+f065-f066,u+f07d-f07e,u+f080,u+f08b,u+f08e,u+f090,u+f09a,u+f0ac,u+f0ae,u+f0b2,u+f0d0,u+f0d6,u+f0e4,u+f0ec,u+f10a-f10b,u+f123,u+f13e,u+f148-f149,u+f14c,u+f156,u+f15e,u+f160-f161,u+f163,u+f175-f178,u+f195,u+f1f8,u+f219,u+f27a} \ No newline at end of file diff --git a/_static/vendor/fontawesome/6.1.2/webfonts/fa-brands-400.ttf b/_static/vendor/fontawesome/6.1.2/webfonts/fa-brands-400.ttf deleted file mode 100644 index 24ca8b17c..000000000 Binary files a/_static/vendor/fontawesome/6.1.2/webfonts/fa-brands-400.ttf and /dev/null differ diff --git a/_static/vendor/fontawesome/6.1.2/webfonts/fa-brands-400.woff2 b/_static/vendor/fontawesome/6.1.2/webfonts/fa-brands-400.woff2 deleted file mode 100644 index e67e5cd53..000000000 Binary files a/_static/vendor/fontawesome/6.1.2/webfonts/fa-brands-400.woff2 and /dev/null differ diff --git a/_static/vendor/fontawesome/6.1.2/webfonts/fa-regular-400.ttf b/_static/vendor/fontawesome/6.1.2/webfonts/fa-regular-400.ttf deleted file mode 100644 index c5ac00957..000000000 Binary files a/_static/vendor/fontawesome/6.1.2/webfonts/fa-regular-400.ttf and /dev/null differ diff --git a/_static/vendor/fontawesome/6.1.2/webfonts/fa-regular-400.woff2 b/_static/vendor/fontawesome/6.1.2/webfonts/fa-regular-400.woff2 deleted file mode 100644 index 7dca1d907..000000000 Binary files a/_static/vendor/fontawesome/6.1.2/webfonts/fa-regular-400.woff2 and /dev/null differ diff --git a/_static/vendor/fontawesome/6.1.2/webfonts/fa-solid-900.ttf b/_static/vendor/fontawesome/6.1.2/webfonts/fa-solid-900.ttf deleted file mode 100644 index 43ba1cc7d..000000000 Binary files a/_static/vendor/fontawesome/6.1.2/webfonts/fa-solid-900.ttf and /dev/null differ diff --git a/_static/vendor/fontawesome/6.1.2/webfonts/fa-solid-900.woff2 b/_static/vendor/fontawesome/6.1.2/webfonts/fa-solid-900.woff2 deleted file mode 100644 index 4a7f96652..000000000 Binary files a/_static/vendor/fontawesome/6.1.2/webfonts/fa-solid-900.woff2 and /dev/null differ diff --git a/_static/vendor/fontawesome/6.1.2/webfonts/fa-v4compatibility.ttf b/_static/vendor/fontawesome/6.1.2/webfonts/fa-v4compatibility.ttf deleted file mode 100644 index 243bc25bd..000000000 Binary files a/_static/vendor/fontawesome/6.1.2/webfonts/fa-v4compatibility.ttf and /dev/null differ diff --git a/_static/vendor/fontawesome/6.1.2/webfonts/fa-v4compatibility.woff2 b/_static/vendor/fontawesome/6.1.2/webfonts/fa-v4compatibility.woff2 deleted file mode 100644 index e18a16d59..000000000 Binary files a/_static/vendor/fontawesome/6.1.2/webfonts/fa-v4compatibility.woff2 and /dev/null differ diff --git a/_static/webpack-macros.html b/_static/webpack-macros.html deleted file mode 100644 index 65389d965..000000000 --- a/_static/webpack-macros.html +++ /dev/null @@ -1,30 +0,0 @@ - -{# Load FontAwesome icons #} -{% macro head_pre_icons() %} - - - - -{% endmacro %} - -{% macro head_pre_assets() %} - - - - -{% endmacro %} - -{% macro head_js_preload() %} - - - -{% endmacro %} - -{% macro body_post() %} - - - -{% endmacro %} \ No newline at end of file diff --git a/developer_guide/contribution_guidelines.html b/developer_guide/contribution_guidelines.html deleted file mode 100644 index 4cccd2ef3..000000000 --- a/developer_guide/contribution_guidelines.html +++ /dev/null @@ -1,583 +0,0 @@ - - - - - - - - - - - - MoviePy’s Contribution Guidelines — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

MoviePy’s Contribution Guidelines#

-
-

Communication on GitHub#

-
    -
  • -
    Keep messages on GitHub issues and pull requests on-topic and to the point. Be aware that each comment triggers a notification which gets sent out to a number of people.
      -
    • Opinions are OK.

    • -
    • For longer or more in-depth discussions, use the MoviePy Gitter. If these discussions lead to a decision, like a merge/reject, please leave a message on the relevant MoviePy issue to document the outcome of the discussion/the reason for the decision.

    • -
    -
    -
    -
  • -
  • Do not push any commit that changes the API without prior discussion.

  • -
-
-
-

Preparing for development#

-
    -
  • Fork the official MoviePy repository to your own GitHub account: -Use the “Fork” button in the top right corner of the GitHub interface while viewing the official MoviePy repository.

  • -
  • Use your fork as the basis for cloning the repository to your local machine: $ git clone URL_TO_YOUR_FORK -You can get the appropriate URL (SSH- or HTTPS-based) by using the green “Code” button located at the top right of the repository view while looking at your fork. By default, Git refers to any remote you clone from – i.e. in this case your fork on GitHub – as origin.

  • -
  • Enter your local clone and add the official MoviePy repository as a second remote, with alias upstream: -$ git remote add upstream git@github.com:Zulko/moviepy.git (using SSL) _or_ -$ git remote add upstream https://github.com/Zulko/moviepy.git (using HTTPS).

  • -
  • Install the library inside a virtual environment with all dependencies included using $ pip install -e ".[optional,doc,test,lint]"

  • -
  • Configure pre-commit hooks running $ pre-commit install

  • -
-
-
-

Coding conventions, code quality#

-
    -
  • Respect PEP8 conventions.

  • -
  • Add just the “right” amount of comments. Try to write auto-documented code with very explicit variable names.

  • -
  • If you introduce new functionality or fix a bug, document it in the docstring or with code comments.

  • -
  • MoviePy’s team adopted pre-commit to run code checks using black, flake8 and isort, so make sure that you’ve configured the pre-commit hooks with pre-commit install.

  • -
-
-
-

Standard contribution workflow#

-
-

Local development#

-
    -
  • Keep your local master branch up-to-date with the official repo’s master by periodically fetching/pulling it: -$ git pull upstream master

  • -
  • Never make changes on master directly, but branch off into separate develop branches: -$ git checkout --branch YOUR_DEVELOP_BRANCH -Ideally, these are given names which function as keywords for what you are working on, and are prefixed with fix_ (for bug fixes), feature_ or something similarly appropriate and descriptive.

  • -
  • Base any changes you submit on the most recent master.

  • -
-

More detailed explanation of the last point:

-

It is likely that the official repo’s master branch will move on (get updated, have other PRs merged into it) while you are working on your changes. Before creating a pull request, you will have to make sure your changes are not based on outdated code. For this reason, it makes sense to avoid falling “too much behind” while developing by rebasing your local master branch at intervals. Make sure your master branch is in sync with the official master branch (as per the first point), then, while checked into your develop branch, run: $ git rebase master

-

If you haven’t rebased before, make sure to familiarise yourself with the concept.

-
-
-

Submitting Pull Requests#

-

You do not have to have finished your feature or bug fix before submitting a PR; just mention that it still is a work in progress.

-

Before submitting PRs:

-
    -
  • run the test suite over your code to expose any problems: $ pytest

  • -
  • push your local develop branch to your GitHub fork $ git push origin YOUR_DEVELOP_BRANCH

  • -
-

When you now look at your forked repo on your GitHub account, you will see GitHub suggest branches for sending pull requests to the official Zulko/moviepy repository.

-

Once you open a PR, you will be presented with a template which you are asked to fill out. You are encouraged to add any additional information which helps provide further context to your changes, and to link to any issues or PRs which your pull request references or is informed by.

-

On submitting your PR, an automated test suite runs over your submission, which might take a few minutes to complete. In a next step, a MoviePy maintainer will review your code and, if necessary, help you to get it merge-ready.

-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/developer_guide/developers_install.html b/developer_guide/developers_install.html deleted file mode 100644 index 9e672ff5a..000000000 --- a/developer_guide/developers_install.html +++ /dev/null @@ -1,553 +0,0 @@ - - - - - - - - - - - - Installation for MoviePy developers — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Installation for MoviePy developers#

-
-

Warning

-

This part is only destined to people who want to build the MoviePy documentation by themself, or to contribute to MoviePy, normal user dont need it.

-
-

In addition to MoviePy main libraries, MoviePy developers will also need to install additional libraries to be able to run MoviePy tests and build the MoviePy documentation.

-
-

Libraries for documentation#

-

You can install the libraries required to build documentation with:

-
$ (sudo) pip install moviepy[doc]
-
-
-

Once libraries installed you can build the documentation with:

-
$ python setup.py build_docs
-
-
-
-
-

Libraries for testing and linting#

-

You can install the libraries required for testing and linting with:

-
$ (sudo) pip install moviepy[test]
-$ (sudo) pip install moviepy[lint]
-
-
-

Once libraries installed you can test with:

-
$ python -m pytest
-
-
-

And you can lint with :

-
$ python -m black .
-
-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/developer_guide/index.html b/developer_guide/index.html deleted file mode 100644 index 47435b6ae..000000000 --- a/developer_guide/index.html +++ /dev/null @@ -1,514 +0,0 @@ - - - - - - - - - - - - The MoviePy Developers Guide — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - - - - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/genindex.html b/genindex.html deleted file mode 100644 index 1f8cb6292..000000000 --- a/genindex.html +++ /dev/null @@ -1,2000 +0,0 @@ - - - - - - - - - - - Index — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
- - - - - -
- - -

Index

- -
- A - | B - | C - | D - | E - | F - | G - | H - | I - | L - | M - | N - | O - | P - | R - | S - | T - | U - | V - | W - -
-

A

- - - -
- -

B

- - - -
- -

C

- - - -
- -

D

- - - -
- -

E

- - - -
- -

F

- - - -
- -

G

- - - -
- -

H

- - - -
- -

I

- - - -
- -

L

- - - -
- -

M

- - - -
    -
  • - moviepy.audio.tools.cuts - -
  • -
  • - moviepy.Clip - -
  • -
  • - moviepy.config - -
  • -
  • - moviepy.decorators - -
  • -
  • - moviepy.Effect - -
  • -
  • - moviepy.tools - -
  • -
  • - moviepy.video - -
  • -
  • - moviepy.video.compositing - -
  • -
  • - moviepy.video.compositing.CompositeVideoClip - -
  • -
  • - moviepy.video.fx - -
  • -
  • - moviepy.video.fx.AccelDecel - -
  • -
  • - moviepy.video.fx.BlackAndWhite - -
  • -
  • - moviepy.video.fx.Blink - -
  • -
  • - moviepy.video.fx.Crop - -
  • -
  • - moviepy.video.fx.CrossFadeIn - -
  • -
  • - moviepy.video.fx.CrossFadeOut - -
  • -
  • - moviepy.video.fx.EvenSize - -
  • -
  • - moviepy.video.fx.FadeIn - -
  • -
  • - moviepy.video.fx.FadeOut - -
  • -
  • - moviepy.video.fx.Freeze - -
  • -
  • - moviepy.video.fx.FreezeRegion - -
  • -
  • - moviepy.video.fx.GammaCorrection - -
  • -
  • - moviepy.video.fx.HeadBlur - -
  • -
  • - moviepy.video.fx.InvertColors - -
  • -
  • - moviepy.video.fx.Loop - -
  • -
  • - moviepy.video.fx.LumContrast - -
  • -
  • - moviepy.video.fx.MakeLoopable - -
  • -
  • - moviepy.video.fx.Margin - -
  • -
  • - moviepy.video.fx.MaskColor - -
  • -
  • - moviepy.video.fx.MasksAnd - -
  • -
  • - moviepy.video.fx.MasksOr - -
  • -
  • - moviepy.video.fx.MirrorX - -
  • -
  • - moviepy.video.fx.MirrorY - -
  • -
  • - moviepy.video.fx.MultiplyColor - -
  • -
  • - moviepy.video.fx.MultiplySpeed - -
  • -
  • - moviepy.video.fx.Painting - -
  • -
  • - moviepy.video.fx.Resize - -
  • -
  • - moviepy.video.fx.Rotate - -
  • -
  • - moviepy.video.fx.Scroll - -
  • -
  • - moviepy.video.fx.SlideIn - -
  • -
  • - moviepy.video.fx.SlideOut - -
  • -
  • - moviepy.video.fx.SuperSample - -
  • -
  • - moviepy.video.fx.TimeMirror - -
  • -
  • - moviepy.video.fx.TimeSymmetrize - -
  • -
  • - moviepy.video.io - -
  • -
  • - moviepy.video.io.display_in_notebook - -
  • -
  • - moviepy.video.io.ffmpeg_reader - -
  • -
  • - moviepy.video.io.ffmpeg_tools - -
  • -
  • - moviepy.video.io.ffmpeg_writer - -
  • -
  • - moviepy.video.io.ffplay_previewer - -
  • -
  • - moviepy.video.io.gif_writers - -
  • -
  • - moviepy.video.io.ImageSequenceClip - -
  • -
  • - moviepy.video.io.VideoFileClip - -
  • -
  • - moviepy.video.tools - -
  • -
  • - moviepy.video.tools.credits - -
  • -
  • - moviepy.video.tools.cuts - -
  • -
  • - moviepy.video.tools.drawing - -
  • -
  • - moviepy.video.tools.interpolators - -
  • -
  • - moviepy.video.tools.subtitles - -
  • -
  • - moviepy.video.VideoClip - -
  • -
  • MultiplyColor (class in moviepy.video.fx.MultiplyColor) -
  • -
  • MultiplySpeed (class in moviepy.video.fx.MultiplySpeed) -
  • -
  • MultiplyStereoVolume (class in moviepy.audio.fx.MultiplyStereoVolume) -
  • -
  • MultiplyVolume (class in moviepy.audio.fx.MultiplyVolume) -
  • -
- -

N

- - - -
- -

O

- - -
- -

P

- - - -
- -

R

- - - -
- -

S

- - - -
- -

T

- - - -
- -

U

- - - -
- -

V

- - - -
- -

W

- - - -
- - - -
- - - -
- -
-
-
- -
- - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/getting_started/FAQ.html b/getting_started/FAQ.html deleted file mode 100644 index e8eb483f5..000000000 --- a/getting_started/FAQ.html +++ /dev/null @@ -1,552 +0,0 @@ - - - - - - - - - - - - FAQ and troubleshooting — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

FAQ and troubleshooting#

-

This section intend to answer the most common questions and errors.

-
-

Common errors that are not bugs#

-

These are very common errors which are not considered as bugs to be -solved (but you can still ask for this to change). If these answers -don’t work for you, please open a bug report on Github, or on the dedicated forum on Reddit.

-
-

MoviePy generated a video that cannot be read by my favorite player.#

-

Known reason: one of the video’s dimensions were not even, -for instance 720x405, and you used a MPEG4 codec like libx264 (default -in MoviePy). In this case the video generated uses a format that is -readable only on some readers like VLC.

-
-
-

I can’t seem to read any video with MoviePy#

-

Known reason: you have a deprecated version of FFMPEG, install a recent version from the -website, not from your OS’s repositories! (see Installation).

-
-
-

Previewing videos make them slower than they are#

-

It means that your computer is not good enough to render the clip in real time. Don’t hesitate to play with the options of preview: for instance, lower the fps of the sound (11000 Hz is still fine) and the video. Also, downsizing your video with resize can help.

-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/getting_started/docker.html b/getting_started/docker.html deleted file mode 100644 index 18d4b161d..000000000 --- a/getting_started/docker.html +++ /dev/null @@ -1,566 +0,0 @@ - - - - - - - - - - - - MoviePy Docker — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

MoviePy Docker#

-
-

Prerequisites#

-

Docker installed Docker for Mac, Docker for windows, linux, etc

-
-
-

Build the docker#

-
    -
  1. Move into the moviepy root dir

  2. -
  3. Build the Dockerfile

    -
    docker build -t moviepy -f Dockerfile .
    -
    -
    -
  4. -
-
-
-

How to run the unittests from docker#

-

Run pytest inside the container with the following command

-
docker run -w /moviepy -it moviepy python -m pytest
-
-
-
-
-

Running your own moviepy script from docker#

-

Change directory to where your script is located

-

If moviepy docker container is already running, you can connect by:

-
docker exec -it moviepy python myscript.py
-
-
-

If the container isn’t running already

-
docker run -it moviepy bash
-python myscript.py
-
-
-

You can also start a container and run a script in one command:

-
docker run -it -v `pwd`:/code moviepy python myscript.py
-
-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/getting_started/index.html b/getting_started/index.html deleted file mode 100644 index 75ff35b4e..000000000 --- a/getting_started/index.html +++ /dev/null @@ -1,522 +0,0 @@ - - - - - - - - - - - - Getting started with MoviePy — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - - - - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/getting_started/install.html b/getting_started/install.html deleted file mode 100644 index de785ce8b..000000000 --- a/getting_started/install.html +++ /dev/null @@ -1,587 +0,0 @@ - - - - - - - - - - - - Installation — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Installation#

-

Installation is done with pip if you dont have pip take a look at how to install it.

-

With pip installed, just type this in a terminal :

-
$ (sudo) pip install moviepy
-
-
-
-

Installation of additional binaries#

-

MoviePy depends on the software ffmpeg for video reading and writing and on ffplay for video previewing.

-

You don’t need to worry about ffmpeg, as it should be automatically downloaded/installed by ImageIO during your first use of MoviePy (it takes a few seconds).

-

You do need to worry ffplay if you plan on using video/audio previewing though. In such case, make sure to have ffplay installed (it can usually be found alongside ffmpeg) and -make sure it is accessible to Python, or look how to set a custom path (see below).

-
-

Define custom paths to binaries#

-

If you want to use a specific version of FFMPEG and FFPLAY, you can do so using environment variables.

-

There are a couple of environment variables used by MoviePy that allow you to configure custom paths to the external tools.

-

To setup any of these variables, the easiest way is to do it in Python before importing objects from MoviePy. For example:

-
import os
-os.environ["FFMPEG_BINARY"] = "/path/to/custom/ffmpeg"
-os.environ["FFPLAY_BINARY"] = "/path/to/custom/ffplay"
-
-
-

Alternatively, after installing the optional dependencies, you can create -a .env file in your working directory that will be automatically read. -For example

-
FFMPEG_BINARY=/path/to/custom/ffmpeg
-FFPLAY_BINARY=/path/to/custom/ffplay
-
-
-
-

Environment variables#

-

There are 2 available environment variables for external binaries :

-
-
FFMPEG_BINARY

Normally you can leave it to its default (‘ffmpeg-imageio’) in which -case imageio will download the right ffmpeg binary (on first use) and then always use that binary.

-

The second option is "auto-detect". In this case ffmpeg will be whatever -binary is found on the computer: generally ffmpeg (on Linux/macOS) or ffmpeg.exe (on Windows).

-

Lastly, you can set it to use a binary at a specific location on your disk by specifying the exact path.

-
-
FFPLAY_BINARY

The default is "auto-detect". MoviePy will try to find and use the installed ffplay binary.

-

You can set it to use a binary at a specific location on your disk. On Windows, this might look like:

-
os.environ["FFPLAY_BINARY"] = r"C:\Program Files\ffmpeg\ffplay.exe"
-
-
-
-
-
-
-

Verify if MoviePy find binaries#

-

To test if FFmpeg and FFplay are found by MoviePy, in a Python console, you can run :

-
>>> from moviepy.config import check
->>> check()
-
-
-
-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/getting_started/moviepy_10_minutes.html b/getting_started/moviepy_10_minutes.html deleted file mode 100644 index 33d19ed11..000000000 --- a/getting_started/moviepy_10_minutes.html +++ /dev/null @@ -1,1013 +0,0 @@ - - - - - - - - - - - - MoviePy in 10 Minutes: Creating a Trailer from “Big Buck Bunny” — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

MoviePy in 10 Minutes: Creating a Trailer from “Big Buck Bunny”#

-
-

Note

-

This tutorial aims to be a simple and short introduction for new users wishing to use MoviePy. For a more in-depth exploration of the concepts seen in this tutorial, see The MoviePy User Guide.

-
-

In this tutorial, you will learn the basics of how to use the MoviePy library in just 10 minutes. As an example project for this tutorial, we will create the following trailer for the movie “Big Buck Bunny.”.

-
- -
-

Prerequisites#

-

Before we start, make sure you have MoviePy installed. You can install it using pip:

-
pip install moviepy
-
-
-

Also, we will need to gather a few resources such as the original movie, font files, images, etc. -To make it easy, we have prepared a template project you can download directly:

-
    -
  1. Download the project template and unzip it.

  2. -
  3. Take a look at the resources inside the folder to familiarize yourself.

  4. -
  5. Create a Python script file named trailer.py in the project directory.

  6. -
-

Now, you are ready to proceed to the next steps.

-
-
-

Step 1: Import MoviePy and Load the Video#

-

Let’s start by importing the necessary modules and loading the “Big Buck Bunny” video into our Python program:

-
final_clip.write_videofile("./result.mp4")
-# Lets import moviepy, lets also import numpy we will use it a some point
-from moviepy import *
-import numpy as np
-
-
-#################
-# VIDEO LOADING #
-#################
-# We load our video
-video = VideoFileClip("./resources/bbb.mp4")
-
-
-

As you see, loading a video file is really easy, but MoviePy isn’t limited to video. It can handle images, audio, texts, and even custom animations.

-

No matter the kind of resources, ultimately any clip will be either a VideoClip for any visual element, and an AudioClip for any audio element.

-

In this tutorial, we will only see a few of those, but if you want to explore more, you can find an exhaustive list in the user guide about Loading resources as clips.

-
-
-

Step 2: Extract the Best Scenes#

-

To create our trailer, we will focus on presenting the main characters, so we need to extract parts of the movie. -This is a very classic task, so let’s turn our main clip into multiple subclips:

-
#####################
-# SCENES EXTRACTION #
-#####################
-# We extract the scenes we want to use
-
-# First the characters
-intro_clip = video.with_subclip(1, 11)
-bird_clip = video.with_subclip(16, 20)
-bunny_clip = video.with_subclip(37, 55)
-rodents_clip = video.with_subclip(
-    "00:03:34.75", "00:03:56"
-)  # we can also use string notation with format HH:MM:SS.uS
-rambo_clip = video.with_subclip("04:41.5", "04:44.70")
-
-
-

Here, we use the with_subclip method to extract specific scenes from the main video. We provide the start and end times (in seconds or as text with the format HH:MM:SS.µS) for each scene. -The extracted clips are stored in their respective variables (intro_clip, bird_clip, etc.).

-
-
-

Step 3: Take a First Look with Preview#

-

When editing videos, it’s often essential to preview the clips to ensure they meet our vision. This allows you to watch the segment you’re working on and make any necessary adjustments for the perfect result.

-

To do so using MoviePy, you can utilize the preview() function available for each clip (the complementary audio_preview() is also available for AudioClip).

-
-

Note

-

Note that you will need ffplay installed and accessible to MoviePy for preview to work. You can check if ffplay is available by running the command python3 -c "from moviepy.config import check;check()". -If not, please see Installation of additional binaries.

-
-
#####################
-# SCENES PREVIEWING #
-#####################
-# Now, lets have a first look at our clips
-# Warning: you need ffplay installed for preview to work
-# We set a low fps so our machine can render in real time without slowing down
-intro_clip.preview(fps=20)
-bird_clip.preview(fps=20)
-bunny_clip.preview(fps=20)
-rodents_clip.preview(fps=20)
-rambo_clip.preview(fps=20)
-
-
-

By using the preview, you may have noticed that our clips not only contain video but also audio. This is because when loading a video, you not only load the image but also the audio tracks that are turned into AudioClip and -added to your video clip.

-
-

Note

-

When previewing, you may encounter video slowing or video/audio shifting. This is not a bug; it’s due to the fact that your computer cannot render the preview in real-time. -In such a case, the best course of action is to set the fps parameter for the preview() at a lower value to make things easier on your machine.

-
-
-
-

Step 4: Modify a Clip by Cutting Out a Part of It#

-

After previewing the clips, we notice that the rodents’ scene is a bit long. Let’s modify the clip by removing a specific part. It would be nice to remove parts of the scene that we don’t need. This is also quite a common task in video-editing. -To do so, we are going to use the with_cutout method to remove a portion of the clip between 00:06:00 to 00:10:00.

-
##############################
-# CLIPS MODIFICATION CUTTING #
-##############################
-# Well, looking at the rodent scene it is a bit long isn't?
-# Let's see how we modify the clip with one of the many clip manipulation method starting by with_*
-# in that case by removing of the clip the part between 00:06:00 to 00:10:00 of the clip, using with_cutout
-rodents_clip = rodents_clip.with_cutout(start_time=4, end_time=10)
-
-# Note: You may have noticed that we have reassign rodents_clip, this is because all with_* methods return a modified *copy* of the
-# original clip instead of modifying it directly. In MoviePy any function starting by with_* is out-place instead of in-place
-# meaning it does not modify the original data, but instead copy it and modify/return the copy
-
-# Lets check the result
-rodents_clip.preview(fps=10)
-
-
-

In that particular case, we have used the with_cutout, but this is only one of the many clip manipulation methods starting with with_*. We will see a few others -in this tutorial, but we will miss a lot more. If you want an exhaustive list, go see Api Reference.

-
-

Note

-

You may have noticed that we have reassigned the rodents_clip variable instead of just calling a method on it. -This is because in MoviePy, any function starting with with_* is out-of-place instead of in-place, meaning it does not modify the original data but instead copies it and modifies/returns the copy. -So you need to store the result of the method and, if necessary, reassign the original variable to update your clip.

-
-
-
-

Step 5: Creating Text/Logo Clips#

-

In addition to videos, we often need to work with images and texts. MoviePy offers some specialized kinds of VideoClip specifically for that purpose: ImageClip and TextClip.

-

In our case, we want to create text clips to add text overlays between the video clips. We’ll define the font, text content, font size, and color for each text clip. -We also want to create image clips for the “Big Buck Bunny” logo and the “Made with MoviePy” logo and resize them as needed.

-
############################
-# TEXT/LOGO CLIPS CREATION #
-############################
-# Lets create the texts to put between our clips
-font = "./resources/font/font.ttf"
-intro_text = TextClip(
-    font=font,
-    text="The Blender Foundation and\nPeach Project presents",
-    font_size=50,
-    color="#fff",
-    text_align="center",
-)
-bird_text = TextClip(font=font, text="An unlucky bird", font_size=50, color="#fff")
-bunny_text = TextClip(
-    font=font, text="A (slightly overweight) bunny", font_size=50, color="#fff"
-)
-rodents_text = TextClip(
-    font=font, text="And three rodent pests", font_size=50, color="#fff"
-)
-revenge_text = TextClip(
-    font=font, text="Revenge is coming...", font_size=50, color="#fff"
-)
-made_with_text = TextClip(font=font, text="Made with", font_size=50, color="#fff")
-
-# We will also need the big buck bunny logo, so lets load it and resize it
-logo_clip = ImageClip("./resources/logo_bbb.png").resized(width=400)
-moviepy_clip = ImageClip("./resources/logo_moviepy.png").resized(width=300)
-
-
-

As you can see, ImageClip is quite simple, but TextClip is a rather complicated object. Don’t hesitate to explore the arguments it accepts.

-
-

Note

-

In our example, we have used the resized() method to resize our image clips. This method works just like any with_* method, but because resizing is such a common -task, the name has been shortened to resized(). The same is true for cropped() and rotated().

-
-

Feel free to experiment with different effects and transitions to achieve the desired trailer effect.

-
-
-

Step 6: Timing the clips#

-

We have all the clips we need, but if we were to combine all those clips into a single one using composition (we will see that in the next step), all our clips would start at the same time and play on top of each other, which is obviously not what we want. -Also, some video clips, like the images and texts, have no endpoint/duration at creation (except if you have provided a duration parameter), which means trying to render them will throw an error as it would result in an infinite video.

-

To fix that, we need to specify when a clip should start and stop in the final clip. So, let’s start by indicating when each clip must start and end with the appropriate with_* methods.

-
################
-# CLIPS TIMING #
-################
-# We have all the clips we need, but if we was to turn all thoses clips into a single one with composition (we will see that during next step)
-# all our clips would start at the same time and play on top of each other, which is obviously not what we want.
-# To fix that, we need to say when a clip should start and stop in the final clip.
-# So, lets start by telling when each clip must start and end with appropriate with_* methods
-intro_text = intro_text.with_duration(6).with_start(
-    3
-)  # Intro for 6 seconds, start after 3 seconds
-logo_clip = logo_clip.with_start(intro_text.start + 2).with_end(
-    intro_text.end
-)  # Logo start 2 second after intro text and stop with it
-bird_clip = bird_clip.with_start(
-    intro_clip.end
-)  # Make bird clip start after intro, duration already known
-bird_text = bird_text.with_start(bird_clip.start).with_end(
-    bird_clip.end
-)  # Make text synchro with clip
-bunny_clip = bunny_clip.with_start(bird_clip.end)  # Make bunny clip follow bird clip
-bunny_text = bunny_text.with_start(bunny_clip.start + 2).with_duration(7)
-rodents_clip = rodents_clip.with_start(bunny_clip.end)
-rodents_text = rodents_text.with_start(rodents_clip.start).with_duration(4)
-rambo_clip = rambo_clip.with_start(rodents_clip.end - 1.5)
-revenge_text = revenge_text.with_start(rambo_clip.start + 1.5).with_duration(4)
-made_with_text = made_with_text.with_start(rambo_clip.end).with_duration(3)
-moviepy_clip = moviepy_clip.with_start(made_with_text.start).with_duration(3)
-
-
-
-

Note

-

By default, all clips have a start point at 0. If a clip has no duration but you set the endtime, then the duration will be calculated for you. The reciprocity is also true.

-

So in our case, we either use duration or endtime, depending on what is more practical for each specific case.

-
-
-
-

Step 7: Seeing how all clips combine#

-

Now that all our clips are timed, let’s get a first idea of how our final clip will look. In video editing, the act of assembling multiple videos into a single one is known as composition. -So, MoviePy offers a special kind of VideoClip dedicated to the act of combining multiple clips into one, the CompositeVideoClip.

-

CompositeVideoClip takes an array of clips as input and will play them on top of each other at render time, starting and stopping each clip at its start and end points.

-
-

Note

-

If possible, CompositeVideoClip will extract endpoint and size from the biggest/last ending clip. If a clip in the list has no duration, then you will have to manually set the duration of CompositeVideoClip before rendering.

-
-
########################
-# CLIPS TIMING PREVIEW #
-########################
-# Lets make a first compositing of thoses clips into one single clip and do a quick preview to see if everything is synchro
-
-quick_compo = CompositeVideoClip(
-    [
-        intro_clip,
-        intro_text,
-        logo_clip,
-        bird_clip,
-        bird_text,
-        bunny_clip,
-        bunny_text,
-        rodents_clip,
-        rodents_text,
-        rambo_clip,
-        revenge_text,
-        made_with_text,
-        moviepy_clip,
-    ]
-)
-quick_compo.preview(fps=10)
-
-
-
-
-

Step 8: Positioning our clips#

-

By looking at this first preview, we see that our clips are pretty well timed, but that the positions of our texts and logo are not satisfying.

-

This is because, for now, we have only specified when our clips should appear, and not the position at which they should appear. By default, all clips are positioned from the top left of the video, at (0, 0).

-

All our clips do not have the same sizes (the texts and images are smaller than the videos), and the CompositeVideoClip takes the size of the biggest clip (so in our case, the size of the videos), -so the texts and images are all in the top left portion of the clip.

-

To fix this, we simply have to define the position of our clips in the composition with the method with_position.

-
######################
-# CLIPS POSITIONNING #
-######################
-# Now that we have set the timing of our different clips, we need to make sure they are in the right position
-# We will keep things simple, and almost always set center center for every texts
-bird_text = bird_text.with_position(("center", "center"))
-bunny_text = bunny_text.with_position(("center", "center"))
-rodents_text = rodents_text.with_position(("center", "center"))
-revenge_text = revenge_text.with_position(("center", "center"))
-
-# For the logos and intro/end, we will use pixel position instead of center
-top = intro_clip.h // 2
-intro_text = intro_text.with_position(("center", 200))
-logo_clip = logo_clip.with_position(("center", top))
-made_with_text = made_with_text.with_position(("center", 300))
-moviepy_clip = moviepy_clip.with_position(("center", 360))
-
-# Lets take another look to check positions
-quick_compo = CompositeVideoClip(
-    [
-        intro_clip,
-        intro_text,
-        logo_clip,
-        bird_clip,
-        bird_text,
-        bunny_clip,
-        bunny_text,
-        rodents_clip,
-        rodents_text,
-        rambo_clip,
-        revenge_text,
-        made_with_text,
-        moviepy_clip,
-    ]
-)
-quick_compo.preview(fps=10)
-
-
-
-

Note

-

The position is a tuple with horizontal and vertical position. You can give them as pixels, as strings (top, left, right, bottom, center), and even as a percentage by providing -a float and passing the argument relative=True.

-
-

Now, all our clips are in the right place and timed as expected.

-
-
-

Step 9: Adding transitions and effects#

-

So, our clips are timed and placed, but for now, the result is quite raw. It would be nice to have smoother transitions between the clips. -In MoviePy, this is achieved through the use of effects.

-

Effects play a crucial role in enhancing the visual and auditory appeal of your video clips. Effects are applied to clips to create transitions, transformations, or modifications, resulting in better-looking videos. -Whether you want to add smooth transitions between clips, alter visual appearance, or manipulate audio properties, MoviePy comes with many existing effects to help you bring your creative vision to life with ease.

-

You can find these effects under the namespace vfx for video effects and afx for audio effects.

-
-

Note

-

You can use audio effects on both audio and video clips because when applying audio effects to a video clip, the effect will actually be applied to the video clip’s embedded audio clip instead.

-
-

Using an effect is very simple. You just have to call the method with_effects on your clip and pass an array of effect objects to apply.

-

In our case, we will add simple fade-in/out and cross-fade-in/out transitions between our clips, as well as slow down the rambo_clip.

-
################################
-# CLIPS TRANSITION AND EFFECTS #
-################################
-# Now that our clip are timed and positionned, lets add some transition to make it more natural
-# To do so we use the with_effects method and the video effects in vfx
-# We call with_effects on our clip and pass him an array of effect objects to apply
-# We'll keep it simple, nothing fancy just cross fading
-intro_text = intro_text.with_effects([vfx.CrossFadeIn(1), vfx.CrossFadeOut(1)])
-logo_clip = logo_clip.with_effects([vfx.CrossFadeIn(1), vfx.CrossFadeOut(1)])
-bird_text = bird_text.with_effects([vfx.CrossFadeIn(0.5), vfx.CrossFadeOut(0.5)])
-bunny_text = bunny_text.with_effects([vfx.CrossFadeIn(0.5), vfx.CrossFadeOut(0.5)])
-rodents_text = rodents_text.with_effects([vfx.CrossFadeIn(0.5), vfx.CrossFadeOut(0.5)])
-
-# Also add cross fading on video clips and video clips audio
-# See how video effects are under vfx and audio ones under afx
-intro_clip = intro_clip.with_effects(
-    [vfx.FadeIn(1), vfx.FadeOut(1), afx.AudioFadeIn(1), afx.AudioFadeOut(1)]
-)
-bird_clip = bird_clip.with_effects(
-    [vfx.FadeIn(1), vfx.FadeOut(1), afx.AudioFadeIn(1), afx.AudioFadeOut(1)]
-)
-bunny_clip = bunny_clip.with_effects(
-    [vfx.FadeIn(1), vfx.FadeOut(1), afx.AudioFadeIn(1), afx.AudioFadeOut(1)]
-)
-rodents_clip = rodents_clip.with_effects(
-    [vfx.FadeIn(1), vfx.CrossFadeOut(1.5), afx.AudioFadeIn(1), afx.AudioFadeOut(1.5)]
-)  # Just fade in, rambo clip will do the cross fade
-rambo_clip = rambo_clip.with_effects(
-    [vfx.CrossFadeIn(1.5), vfx.FadeOut(1), afx.AudioFadeIn(1.5), afx.AudioFadeOut(1)]
-)
-rambo_clip = rambo_clip.with_effects(
-    [vfx.CrossFadeIn(1.5), vfx.FadeOut(1), afx.AudioFadeIn(1.5), afx.AudioFadeOut(1)]
-)
-
-# Effects are not only for transition, they can also change a clip timing or apparence
-# To show that, lets also modify the Rambo-like part of our clip to be in slow motion
-# PS : We do it for effect, but this is one of the few effects that have a direct shortcut, with_multiply_speed
-# the others are with_multiply_volume, resized, croped and rotated
-rambo_clip = rambo_clip.with_effects([vfx.MultiplySpeed(0.5)])
-
-# Because we modified timing of rambo_clip with our MultiplySpeed effect, we must re-assign the following clips timing
-made_with_text = made_with_text.with_start(rambo_clip.end).with_duration(3)
-moviepy_clip = moviepy_clip.with_start(made_with_text.start).with_duration(3)
-
-# Let's have a last look at the result to make sure everything is working as expected
-quick_comp = CompositeVideoClip(
-    [
-        intro_clip,
-        intro_text,
-        logo_clip,
-        bird_clip,
-        bird_text,
-        bunny_clip,
-        bunny_text,
-        rodents_clip,
-        rodents_text,
-        rambo_clip,
-        revenge_text,
-        made_with_text,
-        moviepy_clip,
-    ]
-)
-quick_comp.preview(fps=10)
-
-
-

Well, this looks a lot nicer! For this tutorial, we want to keep things simple, so we mostly used transitions. However, you can find many different effects and even create your own. -For a more in-depth presentation, see moviepy.video.fx, moviepy.audio.fx, and Creating your own effects.

-
-

Note

-

Looking at the result, you may notice that crossfading makes clips go from transparent to opaque, and reciprocally, and wonder how it works.

-

We won’t get into details, but know that in MoviePy, you can declare some sections of a video clip to be transparent by using masks. Masks are nothing more than -special kinds of video clips that are made of values ranging from 0 for a transparent pixel to 1 for a fully opaque one.

-

For more info, see Mask clips.

-
-
-
-

Step 10: Modifying the appearance of a clip using filters#

-

Finally, to make it more epic, we will apply a custom filter to our Rambo clip to make the image sepia. -MoviePy does not come with a sepia effect out of the box, and creating a full custom effect is beyond the scope of this tutorial. However, we will see how we can apply a simple filter to our clip using the image_transform method.

-

To understand how filters work, you first need to understand that in MoviePy, a clip frame is nothing more than a numpy ndarray of shape HxWx3. -This means we can modify how a frame looks like by applying simple math operations. Doing that on all the frames allows us to apply a filter to our clip!

-

The “apply to all frames” part is done by the image_transform method. This method takes a callback function as an argument, and at render time, it will trigger the callback for each frame of the clip, passing the current frame.

-
-

Warning

-

This is a bit of an advanced usage, and the example involves matrix multiplication. If this is too much for you, you can simply ignore it until you really need to make custom filters, -then go look for a more detailed explanation on how to do filtering (Modify a clip apparence and timing using filters) and create custom effects (Creating your own effects) in the user guide.

-

What you need to remember is just that we can apply filters on images. Here we do it mathematically, but you could very well use a library such as Pillow (provided it can understand numpy images) to do the maths for you!

-
-
###############
-# CLIP FILTER #
-###############
-# Lets finish by modifying our rambo clip to make it sepia
-
-
-# We will start by defining a function that turn a numpy image into sepia
-# It takes the image as numpy array in entry and return the modified image as output
-def sepia_fitler(frame: np.ndarray):
-    # Sepia filter transformation matrix
-    # Sepia transform works by applying to each pixel of the image the following rules
-    # res_R = (R * .393) + (G *.769) + (B * .189)
-    # res_G = (R * .349) + (G *.686) + (B * .168)
-    # res_B = (R * .272) + (G *.534) + (B * .131)
-    #
-    # With numpy we can do that very efficiently by multiplying the image matrix by a transformation matrix
-    sepia_matrix = np.array(
-        [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]
-    )
-
-    # Convert the image to float32 format for matrix multiplication
-    frame = frame.astype(np.float32)
-
-    # Apply the sepia transformation
-    # .T is needed because multiplying matrix of shape (n,m) * (m,k) result in a matrix of shape (n,k)
-    # what we want is (n,m), so we must transpose matrix (m,k) to (k,m)
-    sepia_image = np.dot(frame, sepia_matrix.T)
-
-    # Because final result can be > 255, we limit the result to range [0, 255]
-    sepia_image = np.clip(sepia_image, 0, 255)
-
-    # Convert the image back to uint8 format, because we need integer not float
-    sepia_image = sepia_image.astype(np.uint8)
-
-    return sepia_image
-
-
-# Now, we simply apply the filter to our clip by calling image_transform, which will call our filter on every frame
-rambo_clip = rambo_clip.image_transform(sepia_fitler)
-
-# Let's see how our filter look
-rambo_clip.preview(fps=10)
-
-
-
-
-

Step 11: Rendering the final clip to a file#

-

So, our final clip is ready, and we have made all the cutting and modifications we want. We are now ready to save the final result into a file. In video editing, this operation -is known as rendering.

-

Again, we will keep things simple and just do video rendering without much tweaking. In most cases, MoviePy and FFMPEG automatically find the best settings. Take a look at the write_videofile doc for more info.

-
##################
-# CLIP RENDERING #
-##################
-# Everything is good and ready, we can finally render our clip into a file
-final_clip = CompositeVideoClip(
-    [
-        intro_clip,
-        intro_text,
-        logo_clip,
-        bird_clip,
-        bird_text,
-        bunny_clip,
-        bunny_text,
-        rodents_clip,
-        rodents_text,
-        rambo_clip,
-        revenge_text,
-        made_with_text,
-        moviepy_clip,
-    ]
-)
-final_clip.write_videofile("./result.mp4")
-
-
-
-
-

Conclusion#

-

Congratulations! You have successfully created a trailer for the movie “Big Buck Bunny” using the MoviePy library. This tutorial covered the basics of MoviePy, including loading videos, trimming scenes, adding effects and transitions, overlaying text, and even a little bit of filtering.

-

If you want to dig deeper into MoviePy, we encourage you to try and experiment with this base example by using different effects, transitions, and audio elements to make your trailer truly captivating. -We also encourage you to go and read the The MoviePy User Guide, as well as looking directly at the Api Reference.

-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/getting_started/quick_presentation.html b/getting_started/quick_presentation.html deleted file mode 100644 index 75a5d3c5f..000000000 --- a/getting_started/quick_presentation.html +++ /dev/null @@ -1,602 +0,0 @@ - - - - - - - - - - - - Quick presentation — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Quick presentation#

-

This section explains when MoviePy can be used and how it works.

-
-

Do I need MoviePy?#

-

Here are a few reasons why you may want to edit videos in Python:

-
    -
  • You have many videos to process or to compose in a complicated way.

  • -
  • You want to automate the creation of videos or GIFs on a web server (Django, Flask, etc.)

  • -
  • You want to automate tedious tasks, like title insertions tracking objects, cutting scenes, making end credits, subtitles, etc…

  • -
  • You want to code your own video effects to do something no existing video editor can.

  • -
  • You want to create animations from images generated by another python library (Matplotlib, Mayavi, Gizeh, scikit-images…)

  • -
-

And here are a few uses for which MoviePy is NOT the best solution:

-
    -
  • You only need to do frame-by-frame video analysis (with face detection or other fancy stuff). This could be done with MoviePy in association with other libraries, but really, just use imageio, OpenCV or SimpleCV, these are libraries that specialize in these tasks.

  • -
  • You only want to convert a video file, or turn a series of image files into a movie. In this case it is better to directly call ffmpeg (or avconv or mencoder…) it will be faster more memory-efficient than going through MoviePy.

  • -
-
-
-

Advantages and limitations#

-

MoviePy has been developed with the following goals in mind:

-
    -
  • Simple and intuitive. Basic operations can be done in one line. The code is easy to learn and easy to understand for newcomers.

  • -
  • Flexible. You have total control over the frames of the video and audio, and creating your own effects is easy as Py.

  • -
  • Portable. The code uses very common software (Numpy and FFMPEG) and can run on (almost) any machine with (almost) any version of Python.

  • -
-

For the limitations: MoviePy cannot (yet) stream videos (read from a webcam, or render a video live on a distant machine), and is not really designed for video processing involving many successive frames of a movie (like video stabilization, you’ll need another software for that). You can also have memory problems if you use many video, audio, and image sources at the same time (>100), but this will be fixed in future versions.

-
-
-

Example code#

-

In a typical MoviePy script, you load video or audio files, modify them, put them together, and write the final result to a new video file. As an example, let us load a video, lower the volume, add a title in the center of the video for the first ten seconds, and write the result in a file:

-
# Import everything needed to edit video clips
-from moviepy import *
-
-# Load file example.mp4 and extract only the subclip from 00:00:10 to 00:00:20
-clip = VideoFileClip("long_examples/example2.mp4").with_subclip(10, 20)
-
-# Reduce the audio volume to 80% of his original volume
-clip = clip.with_multiply_volume(0.8)
-
-# Generate a text clip. You can customize the font, color, etc.
-txt_clip = TextClip(
-    font="example.ttf", text="Big Buck Bunny", font_size=70, color="white"
-)
-
-# Say that you want it to appear for 10s at the center of the screen
-txt_clip = txt_clip.with_position("center").with_duration(10)
-
-# Overlay the text clip on the first video clip
-video = CompositeVideoClip([clip, txt_clip])
-
-# Write the result to a file (many options available!)
-video.write_videofile("result.mp4")
-
-
-
-
-

How MoviePy works#

-

MoviePy uses the software ffmpeg to read and to export video and audio files. It also (optionally) uses ffplay to allow for video previewing.

-

Internally, the representation and manipulation of the different media is done using Python’s fast numerical library Numpy. Advanced effects and enhancements also use pillow library.

-../_images/explanations.jpeg -
-
-

The central concept, the clips#

-

The central object of MoviePy is the the clips, with either AudioClip for any audio element, or VideoClip for any visual element. Clips really are the base unit of MoviePy, everything you do is with and on them.

-

Clips can be created from more than just videos or audios though. They can also be created from an image, a text, a custom animation, a folder of images, and even a simple lambda function !

-

To create your final video, what you will do is essentially :

-
    -
  1. Load different resources as clips (see Loading resources as clips)

  2. -
  3. Modify them (see Modifying clips and apply effects)

  4. -
  5. Mixing them into one final clip (see Compositing multiple clips)

  6. -
  7. Render them into a file (see Previewing and saving video clips)

  8. -
-

Of course, MoviePy offer multiple handy solution and tools to facilitate all thoses steps, and let you add new ones by writing your own effects (see Creating your own effects) !

-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/getting_started/updating_to_v2.html b/getting_started/updating_to_v2.html deleted file mode 100644 index 1c1090363..000000000 --- a/getting_started/updating_to_v2.html +++ /dev/null @@ -1,624 +0,0 @@ - - - - - - - - - - - - Updating from v1.X to v2.X — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Updating from v1.X to v2.X#

-

MoviePy v2.0 has undergone some large changes with the aim of making the API more consistent -and intuitive. In order to do so multiple breaking changes have been made. -Therefore, there is a high likelihood that your pre-v2.0 programs will not run without -some changes.

-
-

Dropping support of Python 2#

-

Starting with version 2.0 MoviePy no longer supports Python 2, which makes sense since Python 2 reached its end of life over three years ago. -Focusing on Python 3.7+ allows MoviePy to take advantage of the latest language features and improvements while maintaining code quality and security.

-

Users are encouraged to upgrade to a supported version of Python to continue using MoviePy.

-
-
-

moviepy.editor supression and simplified importation#

-

Before v2.0, it was advised to import from moviepy.editor whenever you needed to do some sort of manual operations, -such as previewing or hand editing, because the editor package was in charge of a lot of magic and initialization, making your life -easier, at the cost of initializing some complex modules like pygame.

-

With version 2.0, the moviepy.editor namespace simply no longer exists. You simply import everything from moviepy like this:

-
from moviepy import * # Simple and nice, the __all__ is set in moviepy so only usefull things will be loaded
-from moviepy import VideoFileClip # You can also import only the things you really need
-
-
-
-
-

Renaming and API unification#

-

One of the most significant change has been renaming all .set_ methods to .with_. More generally, almost all the method modifying a clip now starts -by with_, indicating that they work ‘outplace’, meaning they do not directly modify the clip, but instead copy it, modify this copy, and return the updated copy, -leaving the original clip untouched.

-

We advise you to check in your code for any call of method from Clip objects and check for a matching .with_ equivalent.

-
-
-

Massive refactoring of effects#

-

With version 2.0, effects have undergone massive changes and refactoring. Though the logic of why and when applying effects remain globally the same, -the implementation changed quite heavily.

-

If you used any kind of effects, you will have to update your code!

-
-

Moving effects from function to classes#

-

MoviePy version 2.0 introduces a more structured and object-oriented approach to handling effects. In previous versions, effects were simply Python functions that manipulated video clips or images. -However, in version 2.0 and onwards, effects are now represented as classes.

-

This shift allows for better organization, encapsulation, and reusability of code, as well as more comprehensible code. Each effect is now encapsulated within its own class, making it easier to manage and modify.

-

All effects are now implementing the Effect abstract class, so if you ever used any custom effect.

-

If you ever write your own effect, you will have to migrate to the new object implementation. For more info see Creating your own effects.

-
-
-

Moving from clip.fx to with_effects()#

-

Moving from function to object also meant MoviePy had to drop the method Clip.fx previously used to apply effects in favor of the new with_effects().

-

For more info about how to use effects with v2.0, see Modify a clip using effects.

-
-
-

Removing effects as clip methods#

-

Before version 2.0, when importing from moviepy.editor the effects was added as clip class method at runtime. This is no longer the case.

-

If you previously used effect by calling them as clips method, you must now use with_effects().

-
-
-
-

Dropping many external dependencies and unifying environment#

-

With v1.0, MoviePy relied on many optional external dependencies, trying to gracefully dropback from one library to another in the event one of them was missing, eventually dropping some features when no library was available. -This resulted in complex and hard to maintain code for the MoviePy team, as well as fragmented and hard to understand environment for the users.

-

With v2.0 the MoviePy team tried to offer a simpler, smaller and more unified dependicy list, with focusing on pillow for all complex image manipulation, and dropping altogether the usage of ImageMagick, PyGame, OpenCV, scipy, scikit, and a few others.

-
-
-

Removed features#

-

Sadly, reducing the scope of MoviePy and limiting the external libraries mean that some features had to be removed, if you used any of the following features, you will have to create your own replacement:

-
    -
  • moviepy.video.tools.tracking

  • -
  • moviepy.video.tools.segmenting

  • -
  • moviepy.video.io.sliders

  • -
-
-
-

Miscleanous signature changes#

-

When updating the API and moving from previous libraries to pillow, some miscleanous changes also happen, meaning some methods signatures may have changed.

-

You should check the new signatures if you used any of the following:

-
    -
  • TextClip some arguments named have changed and a path to a font file is now needed at object instanciation

  • -
  • clip.resize is now clip.resized

  • -
  • clip.crop is now clip.cropped

  • -
  • clip.rotate is now clip.rotated

  • -
  • Any previous Clip method not starting by with_ now probably start with it

  • -
-
-
-

Why all thoses changes and updating from v1.0 to v2.0?#

-

You may ask yourself why all thoses changes was introduced? The answer is: time.

-

MoviePy have seen many evolution since his first release and have became kind of a complex project, with ambitions sometimes too important in regards to available manpower on the development team. -Over time, as in any project, inconsistencies have been introduced in order to support new functionnalities without breaking current API, and some initial choices no longer reflected the current state of things.

-

Due to multiple factors, MoviePy have also undergone a long period of time during which the main version distributed through PiPy diverged from the GitHub distributed version, introducing confusion and chaos.

-

In a global effort to simplify futur development and limit confusion by providing a unified environment, it has been decided to release a new major version including the many evolutions than happened over the years, which meant breaking changes, and so a new major version released was required.

-

For thoses interested in how and why all of thoses things have been decided, you can find a lot of the discussion that went into this in GitHub issues #1874, #1089 and #2012.

-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/index.html b/index.html deleted file mode 100644 index 042e90089..000000000 --- a/index.html +++ /dev/null @@ -1,552 +0,0 @@ - - - - - - - - - - - - MoviePy documentation — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
- - - - - -
- -
-

MoviePy documentation#

-_images/logo.png -

Date: Nov 22, 2024 Version:

-

Useful links: -Binary Installers | -Source Repository | -Issues & Ideas | -Q&A Support |

-

MoviePy is the Python reference tool for video editing automation!

-

It’s an open source, MIT-licensed library offering user-friendly video editing -and manipulation tools for the Python programming language.

-
-
-
-
-
-
-Getting started
-

New to MoviePy? Check out the getting started guides. They contain instructions -to install MoviePy’ as well as introduction concepts and tutorials.

-
- -
-
-
-
-
-
-User guide
-

The user guide provides in-depth information on the -key concepts of MoviePy with useful background information and explanation.

-
- -
-
-
-
-
-
-API reference
-

The reference guide contains a detailed description of -the MoviePy API. The reference describes how the methods work and which parameters can -be used. It assumes that you have an understanding of the key concepts.

-
- -
-
-
-
-
-
-Developer guide
-

Saw a typo in the documentation? Want to improve -existing functionalities? The contributing guidelines will guide -you through the process of improving moviepy.

-
- -
-
-
-
-
-

Contribute!#

-

MoviePy is an open source software originally written by Zulko and released under the MIT licence. It works on Windows, Mac, and Linux.

- - - -
-
-
-
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/objects.inv b/objects.inv deleted file mode 100644 index ba4346bab..000000000 Binary files a/objects.inv and /dev/null differ diff --git a/py-modindex.html b/py-modindex.html deleted file mode 100644 index fb3b87e3d..000000000 --- a/py-modindex.html +++ /dev/null @@ -1,815 +0,0 @@ - - - - - - - - - - - Python Module Index — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
- - - - - -
- - -

Python Module Index

- -
- m -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
- m
- moviepy -
    - moviepy.audio -
    - moviepy.audio.AudioClip -
    - moviepy.audio.fx -
    - moviepy.audio.fx.AudioDelay -
    - moviepy.audio.fx.AudioFadeIn -
    - moviepy.audio.fx.AudioFadeOut -
    - moviepy.audio.fx.AudioLoop -
    - moviepy.audio.fx.AudioNormalize -
    - moviepy.audio.fx.MultiplyStereoVolume -
    - moviepy.audio.fx.MultiplyVolume -
    - moviepy.audio.io -
    - moviepy.audio.io.AudioFileClip -
    - moviepy.audio.io.ffmpeg_audiowriter -
    - moviepy.audio.io.ffplay_audiopreviewer -
    - moviepy.audio.io.readers -
    - moviepy.audio.tools -
    - moviepy.audio.tools.cuts -
    - moviepy.Clip -
    - moviepy.config -
    - moviepy.decorators -
    - moviepy.Effect -
    - moviepy.tools -
    - moviepy.video -
    - moviepy.video.compositing -
    - moviepy.video.compositing.CompositeVideoClip -
    - moviepy.video.fx -
    - moviepy.video.fx.AccelDecel -
    - moviepy.video.fx.BlackAndWhite -
    - moviepy.video.fx.Blink -
    - moviepy.video.fx.Crop -
    - moviepy.video.fx.CrossFadeIn -
    - moviepy.video.fx.CrossFadeOut -
    - moviepy.video.fx.EvenSize -
    - moviepy.video.fx.FadeIn -
    - moviepy.video.fx.FadeOut -
    - moviepy.video.fx.Freeze -
    - moviepy.video.fx.FreezeRegion -
    - moviepy.video.fx.GammaCorrection -
    - moviepy.video.fx.HeadBlur -
    - moviepy.video.fx.InvertColors -
    - moviepy.video.fx.Loop -
    - moviepy.video.fx.LumContrast -
    - moviepy.video.fx.MakeLoopable -
    - moviepy.video.fx.Margin -
    - moviepy.video.fx.MaskColor -
    - moviepy.video.fx.MasksAnd -
    - moviepy.video.fx.MasksOr -
    - moviepy.video.fx.MirrorX -
    - moviepy.video.fx.MirrorY -
    - moviepy.video.fx.MultiplyColor -
    - moviepy.video.fx.MultiplySpeed -
    - moviepy.video.fx.Painting -
    - moviepy.video.fx.Resize -
    - moviepy.video.fx.Rotate -
    - moviepy.video.fx.Scroll -
    - moviepy.video.fx.SlideIn -
    - moviepy.video.fx.SlideOut -
    - moviepy.video.fx.SuperSample -
    - moviepy.video.fx.TimeMirror -
    - moviepy.video.fx.TimeSymmetrize -
    - moviepy.video.io -
    - moviepy.video.io.display_in_notebook -
    - moviepy.video.io.ffmpeg_reader -
    - moviepy.video.io.ffmpeg_tools -
    - moviepy.video.io.ffmpeg_writer -
    - moviepy.video.io.ffplay_previewer -
    - moviepy.video.io.gif_writers -
    - moviepy.video.io.ImageSequenceClip -
    - moviepy.video.io.VideoFileClip -
    - moviepy.video.tools -
    - moviepy.video.tools.credits -
    - moviepy.video.tools.cuts -
    - moviepy.video.tools.drawing -
    - moviepy.video.tools.interpolators -
    - moviepy.video.tools.subtitles -
    - moviepy.video.VideoClip -
- - -
- - - -
- -
-
-
- -
- - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/index.html b/reference/index.html deleted file mode 100644 index a28a578f8..000000000 --- a/reference/index.html +++ /dev/null @@ -1,648 +0,0 @@ - - - - - - - - - - - - Api Reference — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Api Reference#

-

This is the definitive place to find all the details on MoviePy API documentation.

-

For a more beginner introduction, please see Getting started with MoviePy, for a more detailed explanations of the different concepts in MoviePy, -see The MoviePy User Guide.

- - - - - - -

moviepy

Imports everything that you need from the MoviePy submodules so that every thing can be directly imported with from moviepy import *.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.Clip.Clip.html b/reference/reference/moviepy.Clip.Clip.html deleted file mode 100644 index 4a9d01a32..000000000 --- a/reference/reference/moviepy.Clip.Clip.html +++ /dev/null @@ -1,1042 +0,0 @@ - - - - - - - - - - - - moviepy.Clip.Clip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.Clip.Clip#

-
-
-class moviepy.Clip.Clip[source]#
-

Base class of all clips (VideoClips and AudioClips).

-
-
-start#
-

When the clip is included in a composition, time of the -composition at which the clip starts playing (in seconds).

-
-
Type:
-

float

-
-
-
- -
-
-end#
-

When the clip is included in a composition, time of the -composition at which the clip stops playing (in seconds).

-
-
Type:
-

float

-
-
-
- -
-
-duration#
-

Duration of the clip (in seconds). Some clips are infinite, in -this case their duration will be None.

-
-
Type:
-

float

-
-
-
- -
-
-close()[source]#
-

Release any resources that are in use.

-
- -
-
-copy()[source]#
-

Allows the usage of .copy() in clips as chained methods invocation.

-
- -
-
-get_frame(t)[source]#
-

Gets a numpy array representing the RGB picture of the clip, -or (mono or stereo) value for a sound clip, at time t.

-
-
Parameters:
-

t (float or tuple or str) – Moment of the clip whose frame will be returned.

-
-
-
- -
-
-is_playing(t)[source]#
-

If t is a time, returns true if t is between the start and the end -of the clip. t can be expressed in seconds (15.35), in (min, sec), in -(hour, min, sec), or as a string: ‘01:03:05.35’. If t is a numpy -array, returns False if none of the t is in the clip, else returns a -vector [b_1, b_2, b_3…] where b_i is true if tti is in the clip.

-
- -
-
-iter_frames(fps=None, with_times=False, logger=None, dtype=None)[source]#
-

Iterates over all the frames of the clip.

-

Returns each frame of the clip as a HxWxN Numpy array, -where N=1 for mask clips and N=3 for RGB clips.

-

This function is not really meant for video editing. It provides an -easy way to do frame-by-frame treatment of a video, for fields like -science, computer vision…

-
-
Parameters:
-
    -
  • fps (int, optional) – Frames per second for clip iteration. Is optional if the clip already -has a fps attribute.

  • -
  • with_times (bool, optional) – Ff True yield tuples of (t, frame) where t is the current -time for the frame, otherwise only a frame object.

  • -
  • logger (str, optional) – Either "bar" for progress bar or None or any Proglog logger.

  • -
  • dtype (type, optional) – Type to cast Numpy array frames. Use dtype="uint8" when using the -pictures to write video, images…

  • -
-
-
-

Examples

-
>>> # prints the maximum of red that is contained
->>> # on the first line of each frame of the clip.
->>> from moviepy import VideoFileClip
->>> myclip = VideoFileClip('myvideo.mp4')
->>> print ( [frame[0,:,0].max()
-             for frame in myclip.iter_frames()])
-
-
-
- -
-
-time_transform(time_func, apply_to=None, keep_duration=False)[source]#
-

Returns a Clip instance playing the content of the current clip -but with a modified timeline, time t being replaced by the return -of time_func(t).

-
-
Parameters:
-
    -
  • time_func (function) – A function t -> new_t.

  • -
  • apply_to ({"mask", "audio", ["mask", "audio"]}, optional) – Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. -Specifies if the filter transform should also be applied to the -audio or the mask of the clip, if any.

  • -
  • keep_duration (bool, optional) – False (default) if the transformation modifies the -duration of the clip.

  • -
-
-
-

Examples

-
>>> # plays the clip (and its mask and sound) twice faster
->>> new_clip = clip.time_transform(lambda t: 2*t, apply_to=['mask', 'audio'])
->>>
->>> # plays the clip starting at t=3, and backwards:
->>> new_clip = clip.time_transform(lambda t: 3-t)
-
-
-
- -
-
-transform(func, apply_to=None, keep_duration=True)[source]#
-

General processing of a clip.

-

Returns a new Clip whose frames are a transformation -(through function func) of the frames of the current clip.

-
-
Parameters:
-
    -
  • func (function) – A function with signature (gf,t -> frame) where gf will -represent the current clip’s get_frame method, -i.e. gf is a function (t->image). Parameter t is a time -in seconds, frame is a picture (=Numpy array) which will be -returned by the transformed clip (see examples below).

  • -
  • apply_to ({"mask", "audio", ["mask", "audio"]}, optional) – Can be either 'mask', or 'audio', or -['mask','audio']. -Specifies if the filter should also be applied to the -audio or the mask of the clip, if any.

  • -
  • keep_duration (bool, optional) – Set to True if the transformation does not change the -duration of the clip.

  • -
-
-
-

Examples

-

In the following new_clip a 100 pixels-high clip whose video -content scrolls from the top to the bottom of the frames of -clip at 50 pixels per second.

-
>>> filter = lambda get_frame,t : get_frame(t)[int(t):int(t)+50, :]
->>> new_clip = clip.transform(filter, apply_to='mask')
-
-
-
- -
-
-with_cutout(start_time, end_time)[source]#
-

Returns a clip playing the content of the current clip but -skips the extract between start_time and end_time, which can be -expressed in seconds (15.35), in (min, sec), in (hour, min, sec), -or as a string: ‘01:03:05.35’.

-

If the original clip has a duration attribute set, -the duration of the returned clip is automatically computed as -`` duration - (end_time - start_time)``.

-

The resulting clip’s audio and mask will also be cutout -if they exist.

-
-
Parameters:
-
    -
  • start_time (float or tuple or str) – Moment from which frames will be ignored in the resulting output.

  • -
  • end_time (float or tuple or str) – Moment until which frames will be ignored in the resulting output.

  • -
-
-
-
- -
-
-with_duration(duration, change_end=True)[source]#
-

Returns a copy of the clip, with the duration attribute set to -t, which can be expressed in seconds (15.35), in (min, sec), in -(hour, min, sec), or as a string: ‘01:03:05.35’. Also sets the duration -of the mask and audio, if any, of the returned clip.

-

If change_end is False, the start attribute of the clip will be -modified in function of the duration and the preset end of the clip.

-
-
Parameters:
-
    -
  • duration (float) – New duration attribute value for the clip.

  • -
  • change_end (bool, optional) – If True, the end attribute value of the clip will be adjusted -accordingly to the new duration using clip.start + duration.

  • -
-
-
-
- -
-
-with_effects(effects: List[Effect])[source]#
-

Return a copy of the current clip with the effects applied

-
>>> new_clip = clip.with_effects([vfx.Resize(0.2, method="bilinear")])
-
-
-

You can also pass multiple effect as a list

-
>>> clip.with_effects([afx.VolumeX(0.5), vfx.Resize(0.3), vfx.Mirrorx()])
-
-
-
- -
-
-with_end(t)[source]#
-

Returns a copy of the clip, with the end attribute set to t, -which can be expressed in seconds (15.35), in (min, sec), in -(hour, min, sec), or as a string: ‘01:03:05.35’. Also sets the duration -of the mask and audio, if any, of the returned clip.

-
-
Parameters:
-

t (float or tuple or str) – New end attribute value for the clip.

-
-
-
- -
-
-with_fps(fps, change_duration=False)[source]#
-

Returns a copy of the clip with a new default fps for functions like -write_videofile, iterframe, etc.

-
-
Parameters:
-
    -
  • fps (int) – New fps attribute value for the clip.

  • -
  • change_duration (bool, optional) – If change_duration=True, then the video speed will change to -match the new fps (conserving all frames 1:1). For example, if the -fps is halved in this mode, the duration will be doubled.

  • -
-
-
-
- -
-
-with_is_mask(is_mask)[source]#
-

Says whether the clip is a mask or not.

-
-
Parameters:
-

is_mask (bool) – New is_mask attribute value for the clip.

-
-
-
- -
-
-with_make_frame(make_frame)[source]#
-

Sets a make_frame attribute for the clip. Useful for setting -arbitrary/complicated videoclips.

-
-
Parameters:
-

make_frame (function) – New frame creator function for the clip.

-
-
-
- -
-
-with_memoize(memoize)[source]#
-

Sets whether the clip should keep the last frame read in memory.

-
-
Parameters:
-

memoize (bool) – Indicates if the clip should keep the last frame read in memory.

-
-
-
- -
-
-with_multiply_speed(factor: float = None, final_duration: float = None)[source]#
-

Returns a clip playing the current clip but at a speed multiplied -by factor. For info on the parameters, please see vfx.MultiplySpeed.

-
- -
-
-with_multiply_volume(factor: float, start_time=None, end_time=None)[source]#
-

Returns a new clip with audio volume multiplied by the value factor. -For info on the parameters, please see afx.MultiplyVolume

-
- -
-
-with_start(t, change_end=True)[source]#
-

Returns a copy of the clip, with the start attribute set -to t, which can be expressed in seconds (15.35), in (min, sec), -in (hour, min, sec), or as a string: ‘01:03:05.35’.

-

These changes are also applied to the audio and mask -clips of the current clip, if they exist.

-
-
Parameters:
-
    -
  • t (float or tuple or str) – New start attribute value for the clip.

  • -
  • change_end (bool optional) – Indicates if the end attribute value must be changed accordingly, -if possible. If change_end=True and the clip has a duration -attribute, the end attribute of the clip will be updated to -start + duration. If change_end=False and the clip has a -end attribute, the duration attribute of the clip will be -updated to end - start.

  • -
-
-
-
- -
-
-with_subclip(start_time=0, end_time=None)[source]#
-

Returns a clip playing the content of the current clip between times -start_time and end_time, which can be expressed in seconds -(15.35), in (min, sec), in (hour, min, sec), or as a string: -‘01:03:05.35’.

-

The mask and audio of the resulting subclip will be subclips of -mask and audio the original clip, if they exist.

-

It’s equivalent to slice the clip as a sequence, like -clip[t_start:t_end].

-
-
Parameters:
-
    -
  • start_time (float or tuple or str, optional) – Moment that will be chosen as the beginning of the produced clip. If -is negative, it is reset to clip.duration + start_time.

  • -
  • end_time (float or tuple or str, optional) –

    Moment that will be chosen as the end of the produced clip. If not -provided, it is assumed to be the duration of the clip (potentially -infinite). If is negative, it is reset to clip.duration + end_time. -For instance:

    -
    >>> # cut the last two seconds of the clip:
    ->>> new_clip = clip.with_subclip(0, -2)
    -
    -
    -

    If end_time is provided or if the clip has a duration attribute, -the duration of the returned clip is set automatically.

    -

  • -
-
-
-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.Clip.html b/reference/reference/moviepy.Clip.html deleted file mode 100644 index 73270bd89..000000000 --- a/reference/reference/moviepy.Clip.html +++ /dev/null @@ -1,654 +0,0 @@ - - - - - - - - - - - - moviepy.Clip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.Clip#

-

Implements the central object of MoviePy, the Clip, and all the methods that -are common to the two subclasses of Clip, VideoClip and AudioClip.

-

Classes

- - - - - - -

Clip()

Base class of all clips (VideoClips and AudioClips).

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.Effect.html b/reference/reference/moviepy.Effect.html deleted file mode 100644 index 0d3208e3f..000000000 --- a/reference/reference/moviepy.Effect.html +++ /dev/null @@ -1,692 +0,0 @@ - - - - - - - - - - - - moviepy.Effect — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.Effect#

-

Defines the base class for all effects in MoviePy.

-
-
-class moviepy.Effect.Effect[source]#
-

Base abstract class for all effects in MoviePy. -Any new effect have to extend this base class.

-
-
-abstract apply(clip: Clip) Clip[source]#
-

Apply the current effect on a clip

-
-
Parameters:
-

clip – The target clip to apply the effect on. -(Internally, MoviePy will always pass a copy of the original clip)

-
-
-
- -
-
-copy()[source]#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.AudioClip.AudioArrayClip.html b/reference/reference/moviepy.audio.AudioClip.AudioArrayClip.html deleted file mode 100644 index 253bc821c..000000000 --- a/reference/reference/moviepy.audio.AudioClip.AudioArrayClip.html +++ /dev/null @@ -1,675 +0,0 @@ - - - - - - - - - - - - moviepy.audio.AudioClip.AudioArrayClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.AudioClip.AudioArrayClip#

-
-
-class moviepy.audio.AudioClip.AudioArrayClip(array, fps)[source]#
-

An audio clip made from a sound array.

-
-
Parameters:
-
    -
  • array – A Numpy array representing the sound, of size Nx1 for mono, -Nx2 for stereo.

  • -
  • fps – Frames per second : speed at which the sound is supposed to be -played.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.AudioClip.AudioClip.html b/reference/reference/moviepy.audio.AudioClip.AudioClip.html deleted file mode 100644 index bac28ce2a..000000000 --- a/reference/reference/moviepy.audio.AudioClip.AudioClip.html +++ /dev/null @@ -1,843 +0,0 @@ - - - - - - - - - - - - moviepy.audio.AudioClip.AudioClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.AudioClip.AudioClip#

-
-
-class moviepy.audio.AudioClip.AudioClip(make_frame=None, duration=None, fps=None)[source]#
-

Base class for audio clips.

-

See AudioFileClip and CompositeAudioClip for usable classes.

-

An AudioClip is a Clip with a make_frame attribute of -the form `` t -> [ f_t ]`` for mono sound and -t-> [ f1_t, f2_t ] for stereo sound (the arrays are Numpy arrays). -The f_t are floats between -1 and 1. These bounds can be -trespassed without problems (the program will put the -sound back into the bounds at conversion time, without much impact).

-
-
Parameters:
-
    -
  • make_frame – A function t-> frame at time t. The frame does not mean much -for a sound, it is just a float. What ‘makes’ the sound are -the variations of that float in the time.

  • -
  • duration – Duration of the clip (in seconds). Some clips are infinite, in -this case their duration will be None.

  • -
  • nchannels – Number of channels (one or two for mono or stereo).

  • -
-
-
-

Examples

-
>>> # Plays the note A in mono (a sine wave of frequency 440 Hz)
->>> import numpy as np
->>> make_frame = lambda t: np.sin(440 * 2 * np.pi * t)
->>> clip = AudioClip(make_frame, duration=5, fps=44100)
->>> clip.preview()
-
-
-
>>> # Plays the note A in stereo (two sine waves of frequencies 440 and 880 Hz)
->>> make_frame = lambda t: np.array([
-...     np.sin(440 * 2 * np.pi * t),
-...     np.sin(880 * 2 * np.pi * t)
-... ]).T.copy(order="C")
->>> clip = AudioClip(make_frame, duration=3, fps=44100)
->>> clip.preview()
-
-
-
-
-audiopreview(fps=None, buffersize=2000, nbytes=2, audio_flag=None, video_flag=None)[source]#
-

Preview an AudioClip using ffplay

-
-
Parameters:
-
    -
  • fps – Frame rate of the sound. 44100 gives top quality, but may cause -problems if your computer is not fast enough and your clip is -complicated. If the sound jumps during the preview, lower it -(11025 is still fine, 5000 is tolerable).

  • -
  • buffersize – The sound is not generated all at once, but rather made by bunches -of frames (chunks). buffersize is the size of such a chunk. -Try varying it if you meet audio problems (but you shouldn’t -have to).

  • -
  • nbytes – Number of bytes to encode the sound: 1 for 8bit sound, 2 for -16bit, 4 for 32bit sound. 2 bytes is fine.

  • -
  • audio_flag – Instances of class threading events that are used to synchronize -video and audio during VideoClip.preview().

  • -
  • video_flag – Instances of class threading events that are used to synchronize -video and audio during VideoClip.preview().

  • -
-
-
-
- -
-
-display_in_notebook(filetype=None, maxduration=60, t=None, fps=None, rd_kwargs=None, center=True, **html_kwargs)#
-

Displays clip content in an Jupyter Notebook.

-

Remarks: If your browser doesn’t support HTML5, this should warn you. -If nothing is displayed, maybe your file or filename is wrong. -Important: The media will be physically embedded in the notebook.

-
-
Parameters:
-
    -
  • clip (moviepy.Clip.Clip) – Either the name of a file, or a clip to preview. The clip will actually -be written to a file and embedded as if a filename was provided.

  • -
  • filetype (str, optional) – One of "video", "image" or "audio". If None is given, it is -determined based on the extension of filename, but this can bug.

  • -
  • maxduration (float, optional) – An error will be raised if the clip’s duration is more than the indicated -value (in seconds), to avoid spoiling the browser’s cache and the RAM.

  • -
  • t (float, optional) – If not None, only the frame at time t will be displayed in the notebook, -instead of a video of the clip.

  • -
  • fps (int, optional) – Enables to specify an fps, as required for clips whose fps is unknown.

  • -
  • rd_kwargs (dict, optional) – Keyword arguments for the rendering, like dict(fps=15, bitrate="50k"). -Allow you to give some options to the render process. You can, for -example, disable the logger bar passing dict(logger=None).

  • -
  • center (bool, optional) – If true (default), the content will be wrapped in a -<div align=middle> HTML container, so the content will be displayed -at the center.

  • -
  • kwargs – Allow you to give some options, like width=260, etc. When editing -looping gifs, a good choice is loop=1, autoplay=1.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>> # later ...
->>> clip.display_in_notebook(width=360)
->>> clip.audio.display_in_notebook()
-
-
-
>>> clip.write_gif("test.gif")
->>> display_in_notebook('test.gif')
-
-
-
>>> clip.save_frame("first_frame.jpeg")
->>> display_in_notebook("first_frame.jpeg")
-
-
-
- -
-
-iter_chunks(chunksize=None, chunk_duration=None, fps=None, quantize=False, nbytes=2, logger=None)[source]#
-

Iterator that returns the whole sound array of the clip by chunks

-
- -
-
-max_volume(stereo=False, chunksize=50000, logger=None)[source]#
-

Returns the maximum volume level of the clip.

-
- -
-
-to_soundarray(tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000)[source]#
-

Transforms the sound into an array that can be played by pygame -or written in a wav file. See AudioClip.preview.

-
-
Parameters:
-
    -
  • fps – Frame rate of the sound for the conversion. -44100 for top quality.

  • -
  • nbytes – Number of bytes to encode the sound: 1 for 8bit sound, -2 for 16bit, 4 for 32bit sound.

  • -
-
-
-
- -
-
-write_audiofile(filename, fps=None, nbytes=2, buffersize=2000, codec=None, bitrate=None, ffmpeg_params=None, write_logfile=False, logger='bar')[source]#
-

Writes an audio file from the AudioClip.

-
-
Parameters:
-
    -
  • filename – Name of the output file, as a string or a path-like object.

  • -
  • fps – Frames per second. If not set, it will try default to self.fps if -already set, otherwise it will default to 44100.

  • -
  • nbytes – Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound)

  • -
  • buffersize – The sound is not generated all at once, but rather made by bunches -of frames (chunks). buffersize is the size of such a chunk. -Try varying it if you meet audio problems (but you shouldn’t -have to). Default to 2000

  • -
  • codec – Which audio codec should be used. If None provided, the codec is -determined based on the extension of the filename. Choose -‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav.

  • -
  • bitrate – Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. -Will determine the size and quality of the output file. -Note that it mainly an indicative goal, the bitrate won’t -necessarily be the this in the output file.

  • -
  • ffmpeg_params – Any additional parameters you would like to pass, as a list -of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’]

  • -
  • write_logfile – If true, produces a detailed logfile named filename + ‘.log’ -when writing the file

  • -
  • logger – Either "bar" for progress bar or None or any Proglog logger.

  • -
-
-
-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.AudioClip.CompositeAudioClip.html b/reference/reference/moviepy.audio.AudioClip.CompositeAudioClip.html deleted file mode 100644 index f437f37a8..000000000 --- a/reference/reference/moviepy.audio.AudioClip.CompositeAudioClip.html +++ /dev/null @@ -1,697 +0,0 @@ - - - - - - - - - - - - moviepy.audio.AudioClip.CompositeAudioClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.AudioClip.CompositeAudioClip#

-
-
-class moviepy.audio.AudioClip.CompositeAudioClip(clips)[source]#
-

Clip made by composing several AudioClips.

-

An audio clip made by putting together several audio clips.

-
-
Parameters:
-

clips – List of audio clips, which may start playing at different times or -together, depends on their start attributes. If all have their -duration attribute set, the duration of the composite clip is -computed automatically.

-
-
-
-
-property ends#
-

Returns ending times for all clips in the composition.

-
- -
-
-make_frame(t)[source]#
-

Renders a frame for the composition for the time t.

-
- -
-
-property starts#
-

Returns starting times for all clips in the composition.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.AudioClip.concatenate_audioclips.html b/reference/reference/moviepy.audio.AudioClip.concatenate_audioclips.html deleted file mode 100644 index e5ab0e186..000000000 --- a/reference/reference/moviepy.audio.AudioClip.concatenate_audioclips.html +++ /dev/null @@ -1,671 +0,0 @@ - - - - - - - - - - - - moviepy.audio.AudioClip.concatenate_audioclips — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.AudioClip.concatenate_audioclips#

-
-
-moviepy.audio.AudioClip.concatenate_audioclips(clips)[source]#
-

Concatenates one AudioClip after another, in the order that are passed -to clips parameter.

-
-
Parameters:
-

clips – List of audio clips, which will be played one after other.

-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.AudioClip.html b/reference/reference/moviepy.audio.AudioClip.html deleted file mode 100644 index 853ee7272..000000000 --- a/reference/reference/moviepy.audio.AudioClip.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - - - - - - - moviepy.audio.AudioClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.AudioClip#

-

Implements AudioClip (base class for audio clips) and its main subclasses:

-
    -
  • Audio clips: AudioClip, AudioFileClip, AudioArrayClip

  • -
  • Composition: CompositeAudioClip

  • -
-

Classes

- - - - - - - - - - - - -

AudioArrayClip(array, fps)

An audio clip made from a sound array.

AudioClip([make_frame, duration, fps])

Base class for audio clips.

CompositeAudioClip(clips)

Clip made by composing several AudioClips.

-

Functions

- - - - - - -

concatenate_audioclips(clips)

Concatenates one AudioClip after another, in the order that are passed to clips parameter.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.fx.AudioDelay.html b/reference/reference/moviepy.audio.fx.AudioDelay.html deleted file mode 100644 index a7928537e..000000000 --- a/reference/reference/moviepy.audio.fx.AudioDelay.html +++ /dev/null @@ -1,719 +0,0 @@ - - - - - - - - - - - - moviepy.audio.fx.AudioDelay — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.fx.AudioDelay#

-
-
-class moviepy.audio.fx.AudioDelay.AudioDelay(offset: float = 0.2, n_repeats: int = 8, decay: float = 1)[source]#
-

Repeats audio certain number of times at constant intervals multiplying -their volume levels using a linear space in the range 1 to decay argument -value.

-
-
Parameters:
-
    -
  • offset (float, optional) – Gap between repetitions start times, in seconds.

  • -
  • n_repeats (int, optional) – Number of repetitions (without including the clip itself).

  • -
  • decay (float, optional) – Multiplication factor for the volume level of the last repetition. Each -repetition will have a value in the linear function between 1 and this value, -increasing or decreasing constantly. Keep in mind that the last repetition -will be muted if this is 0, and if is greater than 1, the volume will increase -for each repetition.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>> videoclip = AudioFileClip('myaudio.wav').with_effects([
-...     afx.AudioDelay(offset=.2, n_repeats=10, decayment=.2)
-... ])
-
-
-
>>> # stereo A note
->>> make_frame = lambda t: np.array(
-...     [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)]
-... ).T
-... clip = AudioClip(make_frame=make_frame, duration=0.1, fps=44100)
-... clip = clip.with_effects([afx.AudioDelay(offset=.2, n_repeats=11, decay=0)])
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.fx.AudioFadeIn.html b/reference/reference/moviepy.audio.fx.AudioFadeIn.html deleted file mode 100644 index 94d568965..000000000 --- a/reference/reference/moviepy.audio.fx.AudioFadeIn.html +++ /dev/null @@ -1,700 +0,0 @@ - - - - - - - - - - - - moviepy.audio.fx.AudioFadeIn — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.fx.AudioFadeIn#

-
-
-class moviepy.audio.fx.AudioFadeIn.AudioFadeIn(duration: float)[source]#
-

Return an audio (or video) clip that is first mute, then the -sound arrives progressively over duration seconds.

-
-
Parameters:
-

duration (float) – How long does it take for the sound to return to its normal level.

-
-
-

Examples

-
>>> clip = VideoFileClip("media/chaplin.mp4")
->>> clip.with_effects([vfx.AudioFadeIn("00:00:06")])
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.fx.AudioFadeOut.html b/reference/reference/moviepy.audio.fx.AudioFadeOut.html deleted file mode 100644 index ea2045d73..000000000 --- a/reference/reference/moviepy.audio.fx.AudioFadeOut.html +++ /dev/null @@ -1,701 +0,0 @@ - - - - - - - - - - - - moviepy.audio.fx.AudioFadeOut — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.fx.AudioFadeOut#

-
-
-class moviepy.audio.fx.AudioFadeOut.AudioFadeOut(duration: float)[source]#
-

Return a sound clip where the sound fades out progressively -over duration seconds at the end of the clip.

-
-
Parameters:
-

duration (float) – How long does it take for the sound to reach the zero level at the end -of the clip.

-
-
-

Examples

-
>>> clip = VideoFileClip("media/chaplin.mp4")
->>> clip.with_effects([afx.AudioFadeOut("00:00:06")])
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.fx.AudioLoop.html b/reference/reference/moviepy.audio.fx.AudioLoop.html deleted file mode 100644 index b8a680ded..000000000 --- a/reference/reference/moviepy.audio.fx.AudioLoop.html +++ /dev/null @@ -1,699 +0,0 @@ - - - - - - - - - - - - moviepy.audio.fx.AudioLoop — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.fx.AudioLoop#

-
-
-class moviepy.audio.fx.AudioLoop.AudioLoop(n_loops: int = None, duration: float = None)[source]#
-

Loops over an audio clip.

-

Returns an audio clip that plays the given clip either -n_loops times, or during duration seconds.

-

Examples

-
>>> from moviepy import *
->>> videoclip = VideoFileClip('myvideo.mp4')
->>> music = AudioFileClip('music.ogg')
->>> audio = music.with_effects([afx.AudioLoop(duration=videoclip.duration)])
->>> videoclip.with_audio(audio)
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.fx.AudioNormalize.html b/reference/reference/moviepy.audio.fx.AudioNormalize.html deleted file mode 100644 index 84b936d13..000000000 --- a/reference/reference/moviepy.audio.fx.AudioNormalize.html +++ /dev/null @@ -1,696 +0,0 @@ - - - - - - - - - - - - moviepy.audio.fx.AudioNormalize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.fx.AudioNormalize#

-
-
-class moviepy.audio.fx.AudioNormalize.AudioNormalize[source]#
-

Return a clip whose volume is normalized to 0db.

-

Return an audio (or video) clip whose audio volume is normalized -so that the maximum volume is at 0db, the maximum achievable volume.

-

Examples

-
>>> from moviepy import *
->>> videoclip = VideoFileClip('myvideo.mp4').with_effects([afx.AudioNormalize()])
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.fx.MultiplyStereoVolume.html b/reference/reference/moviepy.audio.fx.MultiplyStereoVolume.html deleted file mode 100644 index 6ced34bb6..000000000 --- a/reference/reference/moviepy.audio.fx.MultiplyStereoVolume.html +++ /dev/null @@ -1,701 +0,0 @@ - - - - - - - - - - - - moviepy.audio.fx.MultiplyStereoVolume — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.fx.MultiplyStereoVolume#

-
-
-class moviepy.audio.fx.MultiplyStereoVolume.MultiplyStereoVolume(left: float = 1, right: float = 1)[source]#
-

For a stereo audioclip, this function enables to change the volume -of the left and right channel separately (with the factors left -and right). Makes a stereo audio clip in which the volume of left -and right is controllable.

-

Examples

-
>>> from moviepy import AudioFileClip
->>> music = AudioFileClip('music.ogg')
->>> # mutes left channel
->>> audio_r = music.with_effects([afx.MultiplyStereoVolume(left=0, right=1)])
->>> # halves audio volume
->>> audio_h = music.with_effects([afx.MultiplyStereoVolume(left=0.5, right=0.5)])
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.fx.MultiplyVolume.html b/reference/reference/moviepy.audio.fx.MultiplyVolume.html deleted file mode 100644 index c0e0982a2..000000000 --- a/reference/reference/moviepy.audio.fx.MultiplyVolume.html +++ /dev/null @@ -1,714 +0,0 @@ - - - - - - - - - - - - moviepy.audio.fx.MultiplyVolume — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.fx.MultiplyVolume#

-
-
-class moviepy.audio.fx.MultiplyVolume.MultiplyVolume(factor: float, start_time: float = None, end_time: float = None)[source]#
-

Returns a clip with audio volume multiplied by the -value factor. Can be applied to both audio and video clips.

-
-
Parameters:
-
    -
  • factor (float) – Volume multiplication factor.

  • -
  • start_time (float, optional) – Time from the beginning of the clip until the volume transformation -begins to take effect, in seconds. By default at the beginning.

  • -
  • end_time (float, optional) – Time from the beginning of the clip until the volume transformation -ends to take effect, in seconds. By default at the end.

  • -
-
-
-

Examples

-
>>> from moviepy import AudioFileClip
->>>
->>> music = AudioFileClip("music.ogg")
->>> # doubles audio volume
->>> doubled_audio_clip = music.with_effects([afx.MultiplyVolume(2)])
->>> # halves audio volume
->>> half_audio_clip = music.with_effects([afx.MultiplyVolume(0.5)])
->>> # silences clip during one second at third
->>> effect = afx.MultiplyVolume(0, start_time=2, end_time=3)
->>> silenced_clip = clip.with_effects([effect])
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.fx.html b/reference/reference/moviepy.audio.fx.html deleted file mode 100644 index a21506987..000000000 --- a/reference/reference/moviepy.audio.fx.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - - - - - - - moviepy.audio.fx — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.fx#

-

All the audio effects that can be applied to AudioClip and VideoClip.

-

Modules

- - - - - - - - - - - - - - - - - - - - - - - - -

moviepy.audio.fx.AudioDelay([offset, ...])

Repeats audio certain number of times at constant intervals multiplying their volume levels using a linear space in the range 1 to decay argument value.

moviepy.audio.fx.AudioFadeIn(duration)

Return an audio (or video) clip that is first mute, then the sound arrives progressively over duration seconds.

moviepy.audio.fx.AudioFadeOut(duration)

Return a sound clip where the sound fades out progressively over duration seconds at the end of the clip.

moviepy.audio.fx.AudioLoop([n_loops, duration])

Loops over an audio clip.

moviepy.audio.fx.AudioNormalize()

Return a clip whose volume is normalized to 0db.

moviepy.audio.fx.MultiplyStereoVolume([...])

For a stereo audioclip, this function enables to change the volume of the left and right channel separately (with the factors left and right).

moviepy.audio.fx.MultiplyVolume(factor[, ...])

Returns a clip with audio volume multiplied by the value factor.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.html b/reference/reference/moviepy.audio.html deleted file mode 100644 index 6d24c16dd..000000000 --- a/reference/reference/moviepy.audio.html +++ /dev/null @@ -1,662 +0,0 @@ - - - - - - - - - - - - moviepy.audio — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio#

-

Everything about audio manipulation.

-

Modules

- - - - - - - - - - - - - - - -

moviepy.audio.AudioClip

Implements AudioClip (base class for audio clips) and its main subclasses:

moviepy.audio.fx

All the audio effects that can be applied to AudioClip and VideoClip.

moviepy.audio.io

Class and methods to read, write, preview audiofiles.

moviepy.audio.tools

Tools to better processing and edition of audio.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.AudioFileClip.AudioFileClip.html b/reference/reference/moviepy.audio.io.AudioFileClip.AudioFileClip.html deleted file mode 100644 index 33266b32d..000000000 --- a/reference/reference/moviepy.audio.io.AudioFileClip.AudioFileClip.html +++ /dev/null @@ -1,742 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.AudioFileClip.AudioFileClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.AudioFileClip.AudioFileClip#

-
-
-class moviepy.audio.io.AudioFileClip.AudioFileClip(filename, decode_file=False, buffersize=200000, nbytes=2, fps=44100)[source]#
-

An audio clip read from a sound file, or an array. -The whole file is not loaded in memory. Instead, only a portion is -read and stored in memory. this portion includes frames before -and after the last frames read, so that it is fast to read the sound -backward and forward.

-
-
Parameters:
-
    -
  • filename – Either a soundfile name (of any extension supported by ffmpeg) -as a string or a path-like object, -or an array representing a sound. If the soundfile is not a .wav, -it will be converted to .wav first, using the fps and -bitrate arguments.

  • -
  • buffersize – Size to load in memory (in number of frames)

  • -
-
-
-
-
-nbytes#
-

Number of bits per frame of the original audio file.

-
- -
-
-fps#
-

Number of frames per second in the audio file

-
- -
-
-buffersize#
-

See Parameters.

-
- -
-
-Lifetime#
-
- -
-
---------
-
- -
-
-Note that this creates subprocesses and locks files. If you construct one
-
- -
-
-of these instances, you must call close() afterwards, or the subresources
-
- -
-
-will not be cleaned up until the process ends.
-
- -

Examples

-
>>> snd = AudioFileClip("song.wav")
->>> snd.close()
-
-
-
-
-close()[source]#
-

Close the internal reader.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.AudioFileClip.html b/reference/reference/moviepy.audio.io.AudioFileClip.html deleted file mode 100644 index a4a8f7a66..000000000 --- a/reference/reference/moviepy.audio.io.AudioFileClip.html +++ /dev/null @@ -1,658 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.AudioFileClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.AudioFileClip#

-

Implements AudioFileClip, a class for audio clips creation using audio files.

-

Classes

- - - - - - -

AudioFileClip(filename[, decode_file, ...])

An audio clip read from a sound file, or an array.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.html b/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.html deleted file mode 100644 index 13c0e5c5e..000000000 --- a/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.html +++ /dev/null @@ -1,694 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter#

-
-
-class moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter(filename, fps_input, nbytes=2, nchannels=2, codec='libfdk_aac', bitrate=None, input_video=None, logfile=None, ffmpeg_params=None)[source]#
-

A class to write an AudioClip into an audio file.

-
-
Parameters:
-
    -
  • filename – Name of any video or audio file, like video.mp4 or sound.wav etc.

  • -
  • size – Size (width,height) in pixels of the output video.

  • -
  • fps_input – Frames per second of the input audio (given by the AUdioClip being -written down).

  • -
  • codec – Name of the ffmpeg codec to use for the output.

  • -
  • bitrate – A string indicating the bitrate of the final video. Only -relevant for codecs which accept a bitrate.

  • -
-
-
-
-
-close()[source]#
-

Closes the writer, terminating the subprocess if is still alive.

-
- -
-
-write_frames(frames_array)[source]#
-

Send the audio frame (a chunck of AudioClip) to ffmpeg for writting

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite.html b/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite.html deleted file mode 100644 index a12e770f1..000000000 --- a/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite.html +++ /dev/null @@ -1,666 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite#

-
-
-moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite(clip, filename, fps, nbytes, buffersize, codec='libvorbis', bitrate=None, write_logfile=False, ffmpeg_params=None, logger='bar')[source]#
-

A function that wraps the FFMPEG_AudioWriter to write an AudioClip -to a file.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.html b/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.html deleted file mode 100644 index b7aeaedce..000000000 --- a/reference/reference/moviepy.audio.io.ffmpeg_audiowriter.html +++ /dev/null @@ -1,666 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.ffmpeg_audiowriter — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.ffmpeg_audiowriter#

-

MoviePy audio writing with ffmpeg.

-

Classes

- - - - - - -

FFMPEG_AudioWriter(filename, fps_input[, ...])

A class to write an AudioClip into an audio file.

-

Functions

- - - - - - -

ffmpeg_audiowrite(clip, filename, fps, ...)

A function that wraps the FFMPEG_AudioWriter to write an AudioClip to a file.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.html b/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.html deleted file mode 100644 index cc75ebbef..000000000 --- a/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.html +++ /dev/null @@ -1,692 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer#

-
-
-class moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer(fps_input, nbytes=2, nchannels=2)[source]#
-

A class to preview an AudioClip.

-
-
Parameters:
-
    -
  • fps_input – Frames per second of the input audio (given by the AUdioClip being -written down).

  • -
  • nbytes – Number of bytes to encode the sound: 1 for 8bit sound, 2 for -16bit, 4 for 32bit sound. Default is 2 bytes, it’s fine.

  • -
  • nchannels – Number of audio channels in the clip. Default to 2 channels.

  • -
-
-
-
-
-close()[source]#
-

Closes the writer, terminating the subprocess if is still alive.

-
- -
-
-write_frames(frames_array)[source]#
-

Send a raw audio frame (a chunck of audio) to ffplay to be played

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview.html b/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview.html deleted file mode 100644 index 2bb4f634f..000000000 --- a/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview.html +++ /dev/null @@ -1,685 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview#

-
-
-moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview(clip, fps=None, buffersize=2000, nbytes=2, audio_flag=None, video_flag=None)[source]#
-

A function that wraps the FFPLAY_AudioPreviewer to preview an AudioClip

-
-
Parameters:
-
    -
  • fps – Frame rate of the sound. 44100 gives top quality, but may cause -problems if your computer is not fast enough and your clip is -complicated. If the sound jumps during the preview, lower it -(11025 is still fine, 5000 is tolerable).

  • -
  • buffersize – The sound is not generated all at once, but rather made by bunches -of frames (chunks). buffersize is the size of such a chunk. -Try varying it if you meet audio problems (but you shouldn’t -have to).

  • -
  • nbytes – Number of bytes to encode the sound: 1 for 8bit sound, 2 for -16bit, 4 for 32bit sound. 2 bytes is fine.

  • -
  • audio_flag – Instances of class threading events that are used to synchronize -video and audio during VideoClip.preview().

  • -
  • video_flag – Instances of class threading events that are used to synchronize -video and audio during VideoClip.preview().

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.html b/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.html deleted file mode 100644 index 919c7ac67..000000000 --- a/reference/reference/moviepy.audio.io.ffplay_audiopreviewer.html +++ /dev/null @@ -1,666 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.ffplay_audiopreviewer — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.ffplay_audiopreviewer#

-

MoviePy audio writing with ffmpeg.

-

Classes

- - - - - - -

FFPLAY_AudioPreviewer(fps_input[, nbytes, ...])

A class to preview an AudioClip.

-

Functions

- - - - - - -

ffplay_audiopreview(clip[, fps, buffersize, ...])

A function that wraps the FFPLAY_AudioPreviewer to preview an AudioClip

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.html b/reference/reference/moviepy.audio.io.html deleted file mode 100644 index c5786da37..000000000 --- a/reference/reference/moviepy.audio.io.html +++ /dev/null @@ -1,667 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - - - - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.readers.FFMPEG_AudioReader.html b/reference/reference/moviepy.audio.io.readers.FFMPEG_AudioReader.html deleted file mode 100644 index 04a75af5c..000000000 --- a/reference/reference/moviepy.audio.io.readers.FFMPEG_AudioReader.html +++ /dev/null @@ -1,764 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.readers.FFMPEG_AudioReader — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.readers.FFMPEG_AudioReader#

-
-
-class moviepy.audio.io.readers.FFMPEG_AudioReader(filename, buffersize, decode_file=False, print_infos=False, fps=44100, nbytes=2, nchannels=2)[source]#
-

A class to read the audio in either video files or audio files -using ffmpeg. ffmpeg will read any audio and transform them into -raw data.

-
-
Parameters:
-
    -
  • filename – Name of any video or audio file, like video.mp4 or -sound.wav etc.

  • -
  • buffersize – The size of the buffer to use. Should be bigger than the buffer -used by write_audiofile

  • -
  • print_infos – Print the ffmpeg infos on the file being read (for debugging)

  • -
  • fps – Desired frames per second in the decoded signal that will be -received from ffmpeg

  • -
  • nbytes – Desired number of bytes (1,2,4) in the signal that will be -received from ffmpeg

  • -
-
-
-
-
-buffer_around(frame_number)[source]#
-

Fill the buffer with frames, centered on frame_number if possible.

-
- -
-
-close()[source]#
-

Closes the reader, terminating the subprocess if is still alive.

-
- -
-
-get_frame(tt)[source]#
-

Retrieve the audio frame(s) corresponding to the given timestamp(s).

-
-
Parameters:
-

numpy.ndarray) (tt (float or) – The timestamp(s) at which to retrieve the audio frame(s). -If tt is a single float value, the frame corresponding to that -timestamp is returned. If tt is a NumPy array of timestamps, an -array of frames corresponding to each timestamp is returned.

-
-
-
- -
-
-initialize(start_time=0)[source]#
-

Opens the file, creates the pipe.

-
- -
-
-read_chunk(chunksize)[source]#
-

Read a chunk of audio data from the audio stream.

-

This method reads a chunk of audio data from the audio stream. The -specified number of frames, given by chunksize, is read from the -proc stdout. The audio data is returned as a NumPy array, where -each row corresponds to a frame and each column corresponds to a -channel. If there is not enough audio left to read, the remaining -portion is padded with zeros, ensuring that the returned array has -the desired length. The pos attribute is updated accordingly.

-
-
Parameters:
-

(float) (chunksize) – The desired number of audio frames to read.

-
-
-
- -
-
-seek(pos)[source]#
-

Read a frame at time t. Note for coders: getting an arbitrary -frame in the video with ffmpeg can be painfully slow if some -decoding has to be done. This function tries to avoid fectching -arbitrary frames whenever possible, by moving between adjacent -frames.

-
- -
-
-skip_chunk(chunksize)[source]#
-

Skip a chunk of audio data by reading and discarding the specified number of -frames from the audio stream. The audio stream is read from the proc stdout. -After skipping the chunk, the pos attribute is updated accordingly.

-
-
Parameters:
-

(int) (chunksize) – The number of audio frames to skip.

-
-
-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.io.readers.html b/reference/reference/moviepy.audio.io.readers.html deleted file mode 100644 index e9dc3384b..000000000 --- a/reference/reference/moviepy.audio.io.readers.html +++ /dev/null @@ -1,658 +0,0 @@ - - - - - - - - - - - - moviepy.audio.io.readers — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.io.readers#

-

MoviePy audio reading with ffmpeg.

-

Classes

- - - - - - -

FFMPEG_AudioReader(filename, buffersize[, ...])

A class to read the audio in either video files or audio files using ffmpeg.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.tools.cuts.find_audio_period.html b/reference/reference/moviepy.audio.tools.cuts.find_audio_period.html deleted file mode 100644 index 1fc3fe8b4..000000000 --- a/reference/reference/moviepy.audio.tools.cuts.find_audio_period.html +++ /dev/null @@ -1,674 +0,0 @@ - - - - - - - - - - - - moviepy.audio.tools.cuts.find_audio_period — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.tools.cuts.find_audio_period#

-
-
-moviepy.audio.tools.cuts.find_audio_period(clip, min_time=0.1, max_time=2, time_resolution=0.01)[source]#
-

Finds the period, in seconds of an audioclip.

-
-
Parameters:
-
    -
  • min_time (float, optional) – Minimum bound for the returned value.

  • -
  • max_time (float, optional) – Maximum bound for the returned value.

  • -
  • time_resolution (float, optional) – Numerical precision.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.tools.cuts.html b/reference/reference/moviepy.audio.tools.cuts.html deleted file mode 100644 index 542c34771..000000000 --- a/reference/reference/moviepy.audio.tools.cuts.html +++ /dev/null @@ -1,658 +0,0 @@ - - - - - - - - - - - - moviepy.audio.tools.cuts — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.tools.cuts#

-

Cutting utilities working with audio.

-

Functions

- - - - - - -

find_audio_period(clip[, min_time, ...])

Finds the period, in seconds of an audioclip.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.audio.tools.html b/reference/reference/moviepy.audio.tools.html deleted file mode 100644 index 3ac5e9187..000000000 --- a/reference/reference/moviepy.audio.tools.html +++ /dev/null @@ -1,658 +0,0 @@ - - - - - - - - - - - - moviepy.audio.tools — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.audio.tools#

-

Tools to better processing and edition of audio.

-

Modules

- - - - - - -

moviepy.audio.tools.cuts

Cutting utilities working with audio.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.config.check.html b/reference/reference/moviepy.config.check.html deleted file mode 100644 index a1e8816b6..000000000 --- a/reference/reference/moviepy.config.check.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - - - - - - - moviepy.config.check — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.config.check#

-
-
-moviepy.config.check()[source]#
-

Check if moviepy has found the binaries for FFmpeg.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.config.html b/reference/reference/moviepy.config.html deleted file mode 100644 index 34006d5fa..000000000 --- a/reference/reference/moviepy.config.html +++ /dev/null @@ -1,656 +0,0 @@ - - - - - - - - - - - - moviepy.config — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.config#

-

Third party programs configuration for MoviePy.

-

Functions

- - - - - - - - - -

check()

Check if moviepy has found the binaries for FFmpeg.

try_cmd(cmd)

Verify if the OS support command invocation as expected by moviepy

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.config.try_cmd.html b/reference/reference/moviepy.config.try_cmd.html deleted file mode 100644 index 6ada9d387..000000000 --- a/reference/reference/moviepy.config.try_cmd.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - - - - - - - moviepy.config.try_cmd — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.config.try_cmd#

-
-
-moviepy.config.try_cmd(cmd)[source]#
-

Verify if the OS support command invocation as expected by moviepy

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.add_mask_if_none.html b/reference/reference/moviepy.decorators.add_mask_if_none.html deleted file mode 100644 index 69621705e..000000000 --- a/reference/reference/moviepy.decorators.add_mask_if_none.html +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.add_mask_if_none — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.add_mask_if_none#

-
-
-moviepy.decorators.add_mask_if_none(func, clip)#
-

Add a mask to the clip if there is none.

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.apply_to_audio.html b/reference/reference/moviepy.decorators.apply_to_audio.html deleted file mode 100644 index 87cc3725b..000000000 --- a/reference/reference/moviepy.decorators.apply_to_audio.html +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.apply_to_audio — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.apply_to_audio#

-
-
-moviepy.decorators.apply_to_audio(func, clip)#
-

Applies the function func to the audio of the clip created with func.

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.apply_to_mask.html b/reference/reference/moviepy.decorators.apply_to_mask.html deleted file mode 100644 index 3534b8b18..000000000 --- a/reference/reference/moviepy.decorators.apply_to_mask.html +++ /dev/null @@ -1,492 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.apply_to_mask — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.apply_to_mask#

-
-
-moviepy.decorators.apply_to_mask(func, clip)#
-

Applies the same function func to the mask of the clip created with -func.

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.audio_video_effect.html b/reference/reference/moviepy.decorators.audio_video_effect.html deleted file mode 100644 index 785bdcf86..000000000 --- a/reference/reference/moviepy.decorators.audio_video_effect.html +++ /dev/null @@ -1,494 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.audio_video_effect — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.audio_video_effect#

-
-
-moviepy.decorators.audio_video_effect(func, effect, clip)#
-

Use an audio function on a video/audio clip.

-

This decorator tells that the function func (audioclip -> audioclip) -can be also used on a video clip, at which case it returns a -videoclip with unmodified video and modified audio.

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.convert_masks_to_RGB.html b/reference/reference/moviepy.decorators.convert_masks_to_RGB.html deleted file mode 100644 index 299f0ccf8..000000000 --- a/reference/reference/moviepy.decorators.convert_masks_to_RGB.html +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.convert_masks_to_RGB — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.convert_masks_to_RGB#

-
-
-moviepy.decorators.convert_masks_to_RGB(func, clip)#
-

If the clip is a mask, convert it to RGB before running the function.

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.convert_parameter_to_seconds.html b/reference/reference/moviepy.decorators.convert_parameter_to_seconds.html deleted file mode 100644 index 63933ea38..000000000 --- a/reference/reference/moviepy.decorators.convert_parameter_to_seconds.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.convert_parameter_to_seconds — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.convert_parameter_to_seconds#

-
-
-moviepy.decorators.convert_parameter_to_seconds(varnames)[source]#
-

Converts the specified variables to seconds.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.convert_path_to_string.html b/reference/reference/moviepy.decorators.convert_path_to_string.html deleted file mode 100644 index 3c1ddff9c..000000000 --- a/reference/reference/moviepy.decorators.convert_path_to_string.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.convert_path_to_string — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.convert_path_to_string#

-
-
-moviepy.decorators.convert_path_to_string(varnames)[source]#
-

Converts the specified variables to a path string.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.html b/reference/reference/moviepy.decorators.html deleted file mode 100644 index c8b99021c..000000000 --- a/reference/reference/moviepy.decorators.html +++ /dev/null @@ -1,659 +0,0 @@ - - - - - - - - - - - - moviepy.decorators — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators#

-

Decorators used by moviepy.

-

Functions

- - - - - - - - - - - - -

convert_parameter_to_seconds(varnames)

Converts the specified variables to seconds.

convert_path_to_string(varnames)

Converts the specified variables to a path string.

preprocess_args(fun, varnames)

Applies fun to variables in varnames before launching the function.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.outplace.html b/reference/reference/moviepy.decorators.outplace.html deleted file mode 100644 index 7ef2693c3..000000000 --- a/reference/reference/moviepy.decorators.outplace.html +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.outplace — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.outplace#

-
-
-moviepy.decorators.outplace(func, clip)#
-

Applies func(clip.copy(), *args, **kwargs) and returns clip.copy().

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.preprocess_args.html b/reference/reference/moviepy.decorators.preprocess_args.html deleted file mode 100644 index 7b2eef5f4..000000000 --- a/reference/reference/moviepy.decorators.preprocess_args.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.preprocess_args — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.preprocess_args#

-
-
-moviepy.decorators.preprocess_args(fun, varnames)[source]#
-

Applies fun to variables in varnames before launching the function.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.requires_duration.html b/reference/reference/moviepy.decorators.requires_duration.html deleted file mode 100644 index ee11903d7..000000000 --- a/reference/reference/moviepy.decorators.requires_duration.html +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.requires_duration — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.requires_duration#

-
-
-moviepy.decorators.requires_duration(func, clip)#
-

Raises an error if the clip has no duration.

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.requires_fps.html b/reference/reference/moviepy.decorators.requires_fps.html deleted file mode 100644 index 5b4ec90b0..000000000 --- a/reference/reference/moviepy.decorators.requires_fps.html +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.requires_fps — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.requires_fps#

-
-
-moviepy.decorators.requires_fps(func, clip)#
-

Raises an error if the clip has no fps.

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.decorators.use_clip_fps_by_default.html b/reference/reference/moviepy.decorators.use_clip_fps_by_default.html deleted file mode 100644 index 8d521ad28..000000000 --- a/reference/reference/moviepy.decorators.use_clip_fps_by_default.html +++ /dev/null @@ -1,491 +0,0 @@ - - - - - - - - - - - - moviepy.decorators.use_clip_fps_by_default — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.decorators.use_clip_fps_by_default#

-
-
-moviepy.decorators.use_clip_fps_by_default(func, clip)#
-

Will use clip.fps if no fps=... is provided in kwargs.

-
- -
- - -
- - - -
- -
-
-
- -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.html b/reference/reference/moviepy.html deleted file mode 100644 index 0ad9f1e11..000000000 --- a/reference/reference/moviepy.html +++ /dev/null @@ -1,669 +0,0 @@ - - - - - - - - - - - - moviepy — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy#

-

Imports everything that you need from the MoviePy submodules so that every thing -can be directly imported with from moviepy import *.

-

Modules

- - - - - - - - - - - - - - - - - - - - - - - - -

moviepy.Clip

Implements the central object of MoviePy, the Clip, and all the methods that are common to the two subclasses of Clip, VideoClip and AudioClip.

moviepy.Effect()

Base abstract class for all effects in MoviePy.

moviepy.audio

Everything about audio manipulation.

moviepy.config

Third party programs configuration for MoviePy.

moviepy.decorators

Decorators used by moviepy.

moviepy.tools

Misc.

moviepy.video

Everything about video manipulation.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.tools.close_all_clips.html b/reference/reference/moviepy.tools.close_all_clips.html deleted file mode 100644 index b770be44f..000000000 --- a/reference/reference/moviepy.tools.close_all_clips.html +++ /dev/null @@ -1,683 +0,0 @@ - - - - - - - - - - - - moviepy.tools.close_all_clips — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.tools.close_all_clips#

-
-
-moviepy.tools.close_all_clips(objects='globals', types=('audio', 'video', 'image'))[source]#
-

Closes all clips in a context.

-

Follows different strategies retrieving the namespace from which the clips -to close will be retrieved depending on the objects argument, and filtering -by type of clips depending on the types argument.

-
-
Parameters:
-
    -
  • objects (str or dict, optional) –

      -
    • If is a string an the value is "globals", will close all the clips -contained by the globals() namespace.

    • -
    • If is a dictionary, the values of the dictionary could be clips to close, -useful if you want to use locals().

    • -
    -

  • -
  • types (Iterable, optional) – Set of types of clips to close, being “audio”, “video” or “image” the supported -values.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.tools.convert_to_seconds.html b/reference/reference/moviepy.tools.convert_to_seconds.html deleted file mode 100644 index 8fd3e114e..000000000 --- a/reference/reference/moviepy.tools.convert_to_seconds.html +++ /dev/null @@ -1,684 +0,0 @@ - - - - - - - - - - - - moviepy.tools.convert_to_seconds — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.tools.convert_to_seconds#

-
-
-moviepy.tools.convert_to_seconds(time)[source]#
-

Will convert any time into seconds.

-

If the type of time is not valid, -it’s returned as is.

-

Here are the accepted formats:

-
>>> convert_to_seconds(15.4)   # seconds
-15.4
->>> convert_to_seconds((1, 21.5))   # (min,sec)
-81.5
->>> convert_to_seconds((1, 1, 2))   # (hr, min, sec)
-3662
->>> convert_to_seconds('01:01:33.045')
-3693.045
->>> convert_to_seconds('01:01:33,5')    # coma works too
-3693.5
->>> convert_to_seconds('1:33,5')    # only minutes and secs
-99.5
->>> convert_to_seconds('33.5')      # only secs
-33.5
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.tools.cross_platform_popen_params.html b/reference/reference/moviepy.tools.cross_platform_popen_params.html deleted file mode 100644 index 90864f615..000000000 --- a/reference/reference/moviepy.tools.cross_platform_popen_params.html +++ /dev/null @@ -1,671 +0,0 @@ - - - - - - - - - - - - moviepy.tools.cross_platform_popen_params — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.tools.cross_platform_popen_params#

-
-
-moviepy.tools.cross_platform_popen_params(popen_params)[source]#
-

Wrap with this function a dictionary of subprocess.Popen kwargs and -will be ready to work without unexpected behaviours in any platform. -Currently, the implementation will add to them:

-
    -
  • creationflags=0x08000000: no extra unwanted window opens on Windows -when the child process is created. Only added on Windows.

  • -
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.tools.deprecated_version_of.html b/reference/reference/moviepy.tools.deprecated_version_of.html deleted file mode 100644 index 3db0213c5..000000000 --- a/reference/reference/moviepy.tools.deprecated_version_of.html +++ /dev/null @@ -1,686 +0,0 @@ - - - - - - - - - - - - moviepy.tools.deprecated_version_of — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.tools.deprecated_version_of#

-
-
-moviepy.tools.deprecated_version_of(func, old_name)[source]#
-

Indicates that a function is deprecated and has a new name.

-

func is the new function and old_name is the name of the deprecated -function.

-
-
Returns:
-

A function that does the same thing as func, but with a docstring -and a printed message on call which say that the function is -deprecated and that you should use func instead.

-
-
Return type:
-

deprecated_func

-
-
-

Examples

-
>>> # The badly named method 'to_file' is replaced by 'write_file'
->>> class Clip:
->>>    def write_file(self, some args):
->>>        # blablabla
->>>
->>> Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file')
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.tools.find_extension.html b/reference/reference/moviepy.tools.find_extension.html deleted file mode 100644 index 82b3e1004..000000000 --- a/reference/reference/moviepy.tools.find_extension.html +++ /dev/null @@ -1,670 +0,0 @@ - - - - - - - - - - - - moviepy.tools.find_extension — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.tools.find_extension#

-
-
-moviepy.tools.find_extension(codec)[source]#
-

Returns the correspondent file extension for a codec.

-
-
Parameters:
-

codec (str) – Video or audio codec name.

-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.tools.html b/reference/reference/moviepy.tools.html deleted file mode 100644 index 74de9173c..000000000 --- a/reference/reference/moviepy.tools.html +++ /dev/null @@ -1,671 +0,0 @@ - - - - - - - - - - - - moviepy.tools — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.tools#

-

Misc. useful functions that can be used at many places in the program.

-

Functions

- - - - - - - - - - - - - - - - - - - - - - - - -

close_all_clips([objects, types])

Closes all clips in a context.

convert_to_seconds(time)

Will convert any time into seconds.

cross_platform_popen_params(popen_params)

Wrap with this function a dictionary of subprocess.Popen kwargs and will be ready to work without unexpected behaviours in any platform.

deprecated_version_of(func, old_name)

Indicates that a function is deprecated and has a new name.

find_extension(codec)

Returns the correspondent file extension for a codec.

no_display_available()

Return True if we determine the host system has no graphical environment.

subprocess_call(cmd[, logger])

Executes the given subprocess command.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.tools.no_display_available.html b/reference/reference/moviepy.tools.no_display_available.html deleted file mode 100644 index 2c55ad0dc..000000000 --- a/reference/reference/moviepy.tools.no_display_available.html +++ /dev/null @@ -1,671 +0,0 @@ - - - - - - - - - - - - moviepy.tools.no_display_available — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.tools.no_display_available#

-
-
-moviepy.tools.no_display_available() bool[source]#
-

Return True if we determine the host system has no graphical environment. -This is usefull to remove tests requiring display, like preview

-
-
..info::

Currently this only works for Linux/BSD systems with X11 or wayland. -It probably works for SunOS, AIX and CYGWIN

-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.tools.subprocess_call.html b/reference/reference/moviepy.tools.subprocess_call.html deleted file mode 100644 index ba7257b8c..000000000 --- a/reference/reference/moviepy.tools.subprocess_call.html +++ /dev/null @@ -1,666 +0,0 @@ - - - - - - - - - - - - moviepy.tools.subprocess_call — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.tools.subprocess_call#

-
-
-moviepy.tools.subprocess_call(cmd, logger='bar')[source]#
-

Executes the given subprocess command.

-

Set logger to None or a custom Proglog logger to avoid printings.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.VideoClip.BitmapClip.html b/reference/reference/moviepy.video.VideoClip.BitmapClip.html deleted file mode 100644 index 76c8ed343..000000000 --- a/reference/reference/moviepy.video.VideoClip.BitmapClip.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - - - - - - - moviepy.video.VideoClip.BitmapClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.VideoClip.BitmapClip#

-
-
-class moviepy.video.VideoClip.BitmapClip(bitmap_frames, *, fps=None, duration=None, color_dict=None, is_mask=False)[source]#
-

Clip made of color bitmaps. Mainly designed for testing purposes.

-
-
-to_bitmap(color_dict=None)[source]#
-

Returns a valid bitmap list that represents each frame of the clip. -If color_dict is not specified, then it will use the same color_dict -that was used to create the clip.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.VideoClip.ColorClip.html b/reference/reference/moviepy.video.VideoClip.ColorClip.html deleted file mode 100644 index d82b110c0..000000000 --- a/reference/reference/moviepy.video.VideoClip.ColorClip.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - - - - - - - moviepy.video.VideoClip.ColorClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.VideoClip.ColorClip#

-
-
-class moviepy.video.VideoClip.ColorClip(size, color=None, is_mask=False, duration=None)[source]#
-

An ImageClip showing just one color.

-
-
Parameters:
-
    -
  • size – Size (width, height) in pixels of the clip.

  • -
  • color – If argument is_mask is False, color indicates -the color in RGB of the clip (default is black). If is_mask` -is True, color must be a float between 0 and 1 (default is 1)

  • -
  • is_mask – Set to true if the clip will be used as a mask.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.VideoClip.DataVideoClip.html b/reference/reference/moviepy.video.VideoClip.DataVideoClip.html deleted file mode 100644 index 294454b64..000000000 --- a/reference/reference/moviepy.video.VideoClip.DataVideoClip.html +++ /dev/null @@ -1,675 +0,0 @@ - - - - - - - - - - - - moviepy.video.VideoClip.DataVideoClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.VideoClip.DataVideoClip#

-
-
-class moviepy.video.VideoClip.DataVideoClip(data, data_to_frame, fps, is_mask=False, has_constant_size=True)[source]#
-

Class of video clips whose successive frames are functions -of successive datasets

-
-
Parameters:
-
    -
  • data – A list of datasets, each dataset being used for one frame of the clip

  • -
  • data_to_frame – A function d -> video frame, where d is one element of the list data

  • -
  • fps – Number of frames per second in the animation

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.VideoClip.ImageClip.html b/reference/reference/moviepy.video.VideoClip.ImageClip.html deleted file mode 100644 index 4ed9ee03f..000000000 --- a/reference/reference/moviepy.video.VideoClip.ImageClip.html +++ /dev/null @@ -1,722 +0,0 @@ - - - - - - - - - - - - moviepy.video.VideoClip.ImageClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.VideoClip.ImageClip#

-
-
-class moviepy.video.VideoClip.ImageClip(img, is_mask=False, transparent=True, fromalpha=False, duration=None)[source]#
-

Class for non-moving VideoClips.

-

A video clip originating from a picture. This clip will simply -display the given picture at all times.

-

Examples

-
>>> clip = ImageClip("myHouse.jpeg")
->>> clip = ImageClip( someArray ) # a Numpy array represent
-
-
-
-
Parameters:
-
    -
  • img – Any picture file (png, tiff, jpeg, etc.) as a string or a path-like object, -or any array representing an RGB image (for instance a frame from a VideoClip).

  • -
  • is_mask – Set this parameter to True if the clip is a mask.

  • -
  • transparent – Set this parameter to True (default) if you want the alpha layer -of the picture (if it exists) to be used as a mask.

  • -
-
-
-
-
-img#
-

Array representing the image of the clip.

-
- -
-
-image_transform(image_func, apply_to=None)[source]#
-

Image-transformation filter.

-

Does the same as VideoClip.image_transform, but for ImageClip the -transformed clip is computed once and for all at the beginning, -and not for each ‘frame’.

-
- -
-
-time_transform(time_func, apply_to=None, keep_duration=False)[source]#
-

Time-transformation filter.

-

Applies a transformation to the clip’s timeline -(see Clip.time_transform).

-

This method does nothing for ImageClips (but it may affect their -masks or their audios). The result is still an ImageClip.

-
- -
-
-transform(func, apply_to=None, keep_duration=True)[source]#
-

General transformation filter.

-

Equivalent to VideoClip.transform. The result is no more an -ImageClip, it has the class VideoClip (since it may be animated)

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.VideoClip.TextClip.html b/reference/reference/moviepy.video.VideoClip.TextClip.html deleted file mode 100644 index 08b439a22..000000000 --- a/reference/reference/moviepy.video.VideoClip.TextClip.html +++ /dev/null @@ -1,724 +0,0 @@ - - - - - - - - - - - - moviepy.video.VideoClip.TextClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.VideoClip.TextClip#

-
-
-class moviepy.video.VideoClip.TextClip(font, text=None, filename=None, font_size=None, size=(None, None), margin=(None, None), color='black', bg_color=None, stroke_color=None, stroke_width=0, method='label', text_align='left', horizontal_align='center', vertical_align='center', interline=4, transparent=True, duration=None)[source]#
-

Class for autogenerated text clips.

-

Creates an ImageClip originating from a script-generated text image.

-
-
Parameters:
-
    -
  • font – Path to the font to use. Must be an OpenType font. -See TextClip.list('font') for the list of fonts you can use on -your computer.

  • -
  • text – A string of the text to write. Can be replaced by argument -filename.

  • -
  • filename – The name of a file in which there is the text to write, -as a string or a path-like object. -Can be provided instead of argument text

  • -
  • font_size – Font size in point. Can be auto-set if method=’caption’, -or if method=’label’ and size is set.

  • -
  • size – Size of the picture in pixels. Can be auto-set if -method=’label’ and font_size is set, but mandatory if method=’caption’. -the height can be None for caption if font_size is defined, -it will then be auto-determined.

  • -
  • margin – Margin to be added arround the text as a tuple of two (symmetrical) or -four (asymmetrical). Either (horizontal, vertical) or -(left, top, right, bottom). By default no margin (None, None). -This is especially usefull for auto-compute size to give the text some -extra room.

  • -
  • bg_color – Color of the background. Default to None for no background. Can be -a RGB (or RGBA if transparent = True) tuple, a color name, or an -hexadecimal notation.

  • -
  • color – Color of the text. Default to “black”. Can be -a RGB (or RGBA if transparent = True) tuple, a color name, or an -hexadecimal notation.

  • -
-
-
-
-
stroke_color

Color of the stroke (=contour line) of the text. If None, -there will be no stroke.

-
-
stroke_width

Width of the stroke, in pixels. Can be a float, like 1.5.

-
-
method

Either ‘label’ (default, the picture will be autosized so as to fit -exactly the size) or ‘caption’ (the text will be drawn in a picture -with fixed size provided with the size argument). If caption, -the text will be wrapped automagically.

-
-
text_align

center | left | right. Text align similar to css. Default to left.

-
-
horizontal_align

center | left | right. Define horizontal align of text bloc in image. -Default to center.

-
-
vertical_align

center | top | bottom. Define vertical align of text bloc in image. -Default to center.

-
-
interline

Interline spacing. Default to 4.

-
-
transparent

True (default) if you want to take into account the -transparency in the image.

-
-
duration

Duration of the clip

-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.VideoClip.UpdatedVideoClip.html b/reference/reference/moviepy.video.VideoClip.UpdatedVideoClip.html deleted file mode 100644 index 3d5c9721b..000000000 --- a/reference/reference/moviepy.video.VideoClip.UpdatedVideoClip.html +++ /dev/null @@ -1,688 +0,0 @@ - - - - - - - - - - - - moviepy.video.VideoClip.UpdatedVideoClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.VideoClip.UpdatedVideoClip#

-
-
-class moviepy.video.VideoClip.UpdatedVideoClip(world, is_mask=False, duration=None)[source]#
-

Class of clips whose make_frame requires some objects to -be updated. Particularly practical in science where some -algorithm needs to make some steps before a new frame can -be generated.

-

UpdatedVideoClips have the following make_frame:

-
>>> def make_frame(t):
->>>     while self.world.clip_t < t:
->>>         world.update() # updates, and increases world.clip_t
->>>     return world.to_frame()
-
-
-
-
Parameters:
-
    -
  • world – An object with the following attributes: -- world.clip_t: the clip’s time corresponding to the world’s state. -- world.update() : update the world’s state, (including increasing -world.clip_t of one time step). -- world.to_frame() : renders a frame depending on the world’s state.

  • -
  • is_mask – True if the clip is a WxH mask with values in 0-1

  • -
  • duration – Duration of the clip, in seconds

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.VideoClip.VideoClip.html b/reference/reference/moviepy.video.VideoClip.VideoClip.html deleted file mode 100644 index 395ee06d7..000000000 --- a/reference/reference/moviepy.video.VideoClip.VideoClip.html +++ /dev/null @@ -1,1276 +0,0 @@ - - - - - - - - - - - - moviepy.video.VideoClip.VideoClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.VideoClip.VideoClip#

-
-
-class moviepy.video.VideoClip.VideoClip(make_frame=None, is_mask=False, duration=None, has_constant_size=True)[source]#
-

Base class for video clips.

-

See VideoFileClip, ImageClip etc. for more user-friendly classes.

-
-
Parameters:
-
    -
  • is_maskTrue if the clip is going to be used as a mask.

  • -
  • duration – Duration of the clip in seconds. If None we got a clip of infinite -duration

  • -
  • has_constant_size – Define if clip size is constant or if it may vary with time. Default -to True

  • -
-
-
-
-
-size#
-

The size of the clip, (width,height), in pixels.

-
- -
-
-w, h
-

The width and height of the clip, in pixels.

-
- -
-
-is_mask#
-

Boolean set to True if the clip is a mask.

-
- -
-
-make_frame#
-

A function t-> frame at time t where frame is a -w*h*3 RGB array.

-
- -
-
-mask(default None)#
-
-
VideoClip mask attached to this clip. If mask is None,

The video clip is fully opaque.

-
-
-
- -
-
-audio(default None)#
-

An AudioClip instance containing the audio of the video clip.

-
- -
-
-pos#
-

A function t->(x,y) where x,y is the position -of the clip when it is composed with other clips. -See VideoClip.set_pos for more details

-
- -
-
-relative_pos#
-

See variable pos.

-
- -
-
-layer#
-

Indicates which clip is rendered on top when two clips overlap in -a CompositeVideoClip. The highest number is rendered on top. -Default is 0.

-
- -
-
-property aspect_ratio#
-

Returns the aspect ratio of the video.

-
- -
-
-blit_on(picture, t)[source]#
-

Returns the result of the blit of the clip’s frame at time t -on the given picture, the position of the clip being given -by the clip’s pos attribute. Meant for compositing.

-
- -
-
-copy()#
-

Mixed copy of the clip.

-

Returns a shallow copy of the clip whose mask and audio will -be shallow copies of the clip’s mask and audio if they exist.

-

This method is intensively used to produce new clips every time -there is an outplace transformation of the clip (clip.resize, -clip.with_subclip, etc.)

-

Acts like a deepcopy except for the fact that readers and other -possible unpickleables objects are not copied.

-
- -
-
-cropped(x1: int = None, y1: int = None, x2: int = None, y2: int = None, width: int = None, height: int = None, x_center: int = None, y_center: int = None)[source]#
-

Returns a new clip in which just a rectangular subregion of the -original clip is conserved. x1,y1 indicates the top left corner and -x2,y2 is the lower right corner of the croped region. -All coordinates are in pixels. Float numbers are accepted. -For info on the parameters, please see vfx.Crop

-
- -
-
-display_in_notebook(filetype=None, maxduration=60, t=None, fps=None, rd_kwargs=None, center=True, **html_kwargs)#
-

Displays clip content in an Jupyter Notebook.

-

Remarks: If your browser doesn’t support HTML5, this should warn you. -If nothing is displayed, maybe your file or filename is wrong. -Important: The media will be physically embedded in the notebook.

-
-
Parameters:
-
    -
  • clip (moviepy.Clip.Clip) – Either the name of a file, or a clip to preview. The clip will actually -be written to a file and embedded as if a filename was provided.

  • -
  • filetype (str, optional) – One of "video", "image" or "audio". If None is given, it is -determined based on the extension of filename, but this can bug.

  • -
  • maxduration (float, optional) – An error will be raised if the clip’s duration is more than the indicated -value (in seconds), to avoid spoiling the browser’s cache and the RAM.

  • -
  • t (float, optional) – If not None, only the frame at time t will be displayed in the notebook, -instead of a video of the clip.

  • -
  • fps (int, optional) – Enables to specify an fps, as required for clips whose fps is unknown.

  • -
  • rd_kwargs (dict, optional) – Keyword arguments for the rendering, like dict(fps=15, bitrate="50k"). -Allow you to give some options to the render process. You can, for -example, disable the logger bar passing dict(logger=None).

  • -
  • center (bool, optional) – If true (default), the content will be wrapped in a -<div align=middle> HTML container, so the content will be displayed -at the center.

  • -
  • kwargs – Allow you to give some options, like width=260, etc. When editing -looping gifs, a good choice is loop=1, autoplay=1.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>> # later ...
->>> clip.display_in_notebook(width=360)
->>> clip.audio.display_in_notebook()
-
-
-
>>> clip.write_gif("test.gif")
->>> display_in_notebook('test.gif')
-
-
-
>>> clip.save_frame("first_frame.jpeg")
->>> display_in_notebook("first_frame.jpeg")
-
-
-
- -
-
-fill_array(pre_array, shape=(0, 0))[source]#
-

Fills an array to match the specified shape.

-

If the pre_array is smaller than the desired shape, the missing rows -or columns are added with ones to the bottom or right, respectively, -until the shape matches. If the pre_array is larger than the desired -shape, the excess rows or columns are cropped from the bottom or right, -respectively, until the shape matches.

-

The resulting array with the filled shape is returned.

-
-
Parameters:
-
    -
  • (numpy.ndarray) (pre_array) – The original array to be filled.

  • -
  • (tuple) (shape) – The desired shape of the resulting array.

  • -
-
-
-
- -
-
-property h#
-

Returns the height of the video.

-
- -
-
-image_transform(image_func, apply_to=None)[source]#
-

Modifies the images of a clip by replacing the frame get_frame(t) by -another frame, image_func(get_frame(t)).

-
- -
-
-property n_frames#
-

Returns the number of frames of the video.

-
- -
-
-preview(fps=15, audio=True, audio_fps=22050, audio_buffersize=3000, audio_nbytes=2)[source]#
-

Displays the clip in a window, at the given frames per second.

-

It will avoid that the clip be played faster than normal, but it -cannot avoid the clip to be played slower than normal if the computations -are complex. In this case, try reducing the fps.

-
-
Parameters:
-
    -
  • fps (int, optional) –

  • -
  • 15. (Number of frames per seconds in the displayed video. Default to) –

  • -
  • audio (bool, optional) –

  • -
  • during (True (default) if you want the clip's audio be played) –

  • -
  • preview. (the) –

  • -
  • audio_fps (int, optional) –

  • -
  • sound. (The number of bytes used generating the audio) –

  • -
  • audio_buffersize (int, optional) –

  • -
  • sound.

  • -
  • audio_nbytes (int, optional) –

  • -
  • sound.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>> clip = VideoFileClip("media/chaplin.mp4")
->>> clip.preview(fps=10, audio=False)
-
-
-
- -
-
-resized(new_size=None, height=None, width=None, apply_to_mask=True)[source]#
-

Returns a video clip that is a resized version of the clip. -For info on the parameters, please see vfx.Resize

-
- -
-
-rotated(angle: float, unit: str = 'deg', resample: str = 'bicubic', expand: bool = False, center: tuple = None, translate: tuple = None, bg_color: tuple = None)[source]#
-

Rotates the specified clip by angle degrees (or radians) anticlockwise -If the angle is not a multiple of 90 (degrees) or center, translate, -and bg_color are not None. -For info on the parameters, please see vfx.Rotate

-
- -
-
-save_frame(filename, t=0, with_mask=True)[source]#
-

Save a clip’s frame to an image file.

-

Saves the frame of clip corresponding to time t in filename. -t can be expressed in seconds (15.35), in (min, sec), -in (hour, min, sec), or as a string: ‘01:03:05.35’.

-
-
Parameters:
-
    -
  • filename (str) – Name of the file in which the frame will be stored.

  • -
  • t (float or tuple or str, optional) – Moment of the frame to be saved. As default, the first frame will be -saved.

  • -
  • with_mask (bool, optional) – If is True the mask is saved in the alpha layer of the picture -(only works with PNGs).

  • -
-
-
-
- -
-
-show(t=0, with_mask=True)[source]#
-

Splashes the frame of clip corresponding to time t.

-
-
Parameters:
-
    -
  • t (float or tuple or str, optional) –

  • -
  • display. (Time in seconds of the frame to) –

  • -
  • with_mask (bool, optional) –

  • -
  • without (False if the clip has a mask but you want to see the clip) –

  • -
  • mask. (the) –

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>>
->>> clip = VideoFileClip("media/chaplin.mp4")
->>> clip.show(t=4)
-
-
-
- -
-
-to_ImageClip(t=0, with_mask=True, duration=None)[source]#
-

Returns an ImageClip made out of the clip’s frame at time t, -which can be expressed in seconds (15.35), in (min, sec), -in (hour, min, sec), or as a string: ‘01:03:05.35’.

-
- -
-
-to_RGB()[source]#
-

Return a non-mask video clip made from the mask video clip.

-
- -
-
-to_mask(canal=0)[source]#
-

Return a mask a video clip made from the clip.

-
- -
-
-property w#
-

Returns the width of the video.

-
- -
-
-with_add_mask()[source]#
-

Add a mask VideoClip to the VideoClip.

-

Returns a copy of the clip with a completely opaque mask -(made of ones). This makes computations slower compared to -having a None mask but can be useful in many cases. Choose

-

Set constant_size to False for clips with moving -image size.

-
- -
-
-with_audio(audioclip)[source]#
-

Attach an AudioClip to the VideoClip.

-

Returns a copy of the VideoClip instance, with the audio -attribute set to audio, which must be an AudioClip instance.

-
- -
-
-with_layer(layer)[source]#
-

Set the clip’s layer in compositions. Clips with a greater layer -attribute will be displayed on top of others.

-

Note: Only has effect when the clip is used in a CompositeVideoClip.

-
- -
-
-with_make_frame(mf)[source]#
-

Change the clip’s get_frame.

-

Returns a copy of the VideoClip instance, with the make_frame -attribute set to mf.

-
- -
-
-with_mask(mask)[source]#
-

Set the clip’s mask.

-

Returns a copy of the VideoClip with the mask attribute set to -mask, which must be a greyscale (values in 0-1) VideoClip.

-
- -
-
-with_on_color(size=None, color=(0, 0, 0), pos=None, col_opacity=None)[source]#
-

Place the clip on a colored background.

-

Returns a clip made of the current clip overlaid on a color -clip of a possibly bigger size. Can serve to flatten transparent -clips.

-
-
Parameters:
-
    -
  • size – Size (width, height) in pixels of the final clip. -By default it will be the size of the current clip.

  • -
  • color – Background color of the final clip ([R,G,B]).

  • -
  • pos – Position of the clip in the final clip. ‘center’ is the default

  • -
  • col_opacity – Parameter in 0..1 indicating the opacity of the colored -background.

  • -
-
-
-
- -
-
-with_opacity(opacity)[source]#
-

Set the opacity/transparency level of the clip.

-

Returns a semi-transparent copy of the clip where the mask is -multiplied by op (any float, normally between 0 and 1).

-
- -
-
-with_position(pos, relative=False)[source]#
-

Set the clip’s position in compositions.

-

Sets the position that the clip will have when included -in compositions. The argument pos can be either a couple -(x,y) or a function t-> (x,y). x and y mark the -location of the top left corner of the clip, and can be -of several types.

-

Examples

-
>>> clip.with_position((45,150)) # x=45, y=150
->>>
->>> # clip horizontally centered, at the top of the picture
->>> clip.with_position(("center","top"))
->>>
->>> # clip is at 40% of the width, 70% of the height:
->>> clip.with_position((0.4,0.7), relative=True)
->>>
->>> # clip's position is horizontally centered, and moving up !
->>> clip.with_position(lambda t: ('center', 50+t) )
-
-
-
- -
-
-with_sub_effects(effects: List[Effect], start_time=0, end_time=None, **kwargs)[source]#
-

Apply a transformation to a part of the clip.

-

Returns a new clip in which the function fun (clip->clip) -has been applied to the subclip between times start_time and end_time -(in seconds).

-

Examples

-
>>> # The scene between times t=3s and t=6s in ``clip`` will be
->>> # be played twice slower in ``new_clip``
->>> new_clip = clip.with_sub_effect(MultiplySpeed(0.5), 3, 6)
-
-
-
- -
-
-without_audio()[source]#
-

Remove the clip’s audio.

-

Return a copy of the clip with audio set to None.

-
- -
-
-write_gif(filename, fps=None, loop=0, logger='bar')[source]#
-

Write the VideoClip to a GIF file.

-

Converts a VideoClip into an animated GIF using imageio

-
-
Parameters:
-
    -
  • filename – Name of the resulting gif file, as a string or a path-like object.

  • -
  • fps – Number of frames per second (see note below). If it -isn’t provided, then the function will look for the clip’s -fps attribute (VideoFileClip, for instance, have one).

  • -
  • loop (int, optional) – Repeat the clip using loop iterations in the resulting GIF.

  • -
  • progress_bar – If True, displays a progress bar

  • -
-
-
-

Notes

-

The gif will be playing the clip in real time (you can -only change the frame rate). If you want the gif to be played -slower than the clip you will use

-
>>> # slow down clip 50% and make it a gif
->>> myClip.multiply_speed(0.5).to_gif('myClip.gif')
-
-
-
- -
-
-write_images_sequence(name_format, fps=None, with_mask=True, logger='bar')[source]#
-

Writes the videoclip to a sequence of image files.

-
-
Parameters:
-
    -
  • name_format – A filename specifying the numerotation format and extension -of the pictures. For instance “frame%03d.png” for filenames -indexed with 3 digits and PNG format. Also possible: -“some_folder/frame%04d.jpeg”, etc.

  • -
  • fps – Number of frames per second to consider when writing the -clip. If not specified, the clip’s fps attribute will -be used if it has one.

  • -
  • with_mask – will save the clip’s mask (if any) as an alpha canal (PNGs only).

  • -
  • logger – Either "bar" for progress bar or None or any Proglog logger.

  • -
-
-
Returns:
-

A list of all the files generated.

-
-
Return type:
-

names_list

-
-
-

Notes

-

The resulting image sequence can be read using e.g. the class -ImageSequenceClip.

-
- -
-
-write_videofile(filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, temp_audiofile_path='', remove_temp=True, write_logfile=False, threads=None, ffmpeg_params=None, logger='bar', pixel_format=None)[source]#
-

Write the clip to a videofile.

-
-
Parameters:
-
    -
  • filename – Name of the video file to write in, as a string or a path-like object. -The extension must correspond to the “codec” used (see below), -or simply be ‘.avi’ (which will work with any codec).

  • -
  • fps – Number of frames per second in the resulting video file. If None is -provided, and the clip has an fps attribute, this fps will be used.

  • -
  • codec

    Codec to use for image encoding. Can be any codec supported -by ffmpeg. If the filename is has extension ‘.mp4’, ‘.ogv’, ‘.webm’, -the codec will be set accordingly, but you can still set it if you -don’t like the default. For other extensions, the output filename -must be set accordingly.

    -

    Some examples of codecs are:

    -
      -
    • 'libx264' (default codec for file extension .mp4) -makes well-compressed videos (quality tunable using ‘bitrate’).

    • -
    • 'mpeg4' (other codec for extension .mp4) can be an alternative -to 'libx264', and produces higher quality videos by default.

    • -
    • 'rawvideo' (use file extension .avi) will produce -a video of perfect quality, of possibly very huge size.

    • -
    • png (use file extension .avi) will produce a video -of perfect quality, of smaller size than with rawvideo.

    • -
    • 'libvorbis' (use file extension .ogv) is a nice video -format, which is completely free/ open source. However not -everyone has the codecs installed by default on their machine.

    • -
    • 'libvpx' (use file extension .webm) is tiny a video -format well indicated for web videos (with HTML5). Open source.

    • -
    -

  • -
  • audio – Either True, False, or a file name. -If True and the clip has an audio clip attached, this -audio clip will be incorporated as a soundtrack in the movie. -If audio is the name of an audio file, this audio file -will be incorporated as a soundtrack in the movie.

  • -
  • audio_fps – frame rate to use when generating the sound.

  • -
  • temp_audiofile – the name of the temporary audiofile, as a string or path-like object, -to be created and then used to write the complete video, if any.

  • -
  • temp_audiofile_path – the location that the temporary audiofile is placed, as a -string or path-like object. Defaults to the current working directory.

  • -
  • audio_codec – Which audio codec should be used. Examples are ‘libmp3lame’ -for ‘.mp3’, ‘libvorbis’ for ‘ogg’, ‘libfdk_aac’:’m4a’, -‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav. -Default is ‘libmp3lame’, unless the video extension is ‘ogv’ -or ‘webm’, at which case the default is ‘libvorbis’.

  • -
  • audio_bitrate – Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. -Will determine the size/quality of audio in the output file. -Note that it mainly an indicative goal, the bitrate won’t -necessarily be the this in the final file.

  • -
  • preset – Sets the time that FFMPEG will spend optimizing the compression. -Choices are: ultrafast, superfast, veryfast, faster, fast, medium, -slow, slower, veryslow, placebo. Note that this does not impact -the quality of the video, only the size of the video file. So -choose ultrafast when you are in a hurry and file size does not -matter.

  • -
  • threads – Number of threads to use for ffmpeg. Can speed up the writing of -the video on multicore computers.

  • -
  • ffmpeg_params – Any additional ffmpeg parameters you would like to pass, as a list -of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’].

  • -
  • write_logfile – If true, will write log files for the audio and the video. -These will be files ending with ‘.log’ with the name of the -output file in them.

  • -
  • logger – Either "bar" for progress bar or None or any Proglog logger.

  • -
  • pixel_format – Pixel format for the output video file.

  • -
-
-
-

Examples

-
>>> from moviepy import VideoFileClip
->>> clip = VideoFileClip("myvideo.mp4").with_subclip(100,120)
->>> clip.write_videofile("my_new_video.mp4")
->>> clip.close()
-
-
-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.VideoClip.html b/reference/reference/moviepy.video.VideoClip.html deleted file mode 100644 index 3de72cae5..000000000 --- a/reference/reference/moviepy.video.VideoClip.html +++ /dev/null @@ -1,680 +0,0 @@ - - - - - - - - - - - - moviepy.video.VideoClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.VideoClip#

-

Implements VideoClip (base class for video clips) and its main subclasses:

-
    -
  • Animated clips: VideoFileClip, ImageSequenceClip, BitmapClip

  • -
  • Static image clips: ImageClip, ColorClip, TextClip,

  • -
-

Classes

- - - - - - - - - - - - - - - - - - - - - - - - -

BitmapClip(bitmap_frames, *[, fps, ...])

Clip made of color bitmaps.

ColorClip(size[, color, is_mask, duration])

An ImageClip showing just one color.

DataVideoClip(data, data_to_frame, fps[, ...])

Class of video clips whose successive frames are functions of successive datasets

ImageClip(img[, is_mask, transparent, ...])

Class for non-moving VideoClips.

TextClip(font[, text, filename, font_size, ...])

Class for autogenerated text clips.

UpdatedVideoClip(world[, is_mask, duration])

Class of clips whose make_frame requires some objects to be updated.

VideoClip([make_frame, is_mask, duration, ...])

Base class for video clips.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.html b/reference/reference/moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.html deleted file mode 100644 index 722ec926b..000000000 --- a/reference/reference/moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.html +++ /dev/null @@ -1,723 +0,0 @@ - - - - - - - - - - - - moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip#

-
-
-class moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip(clips, size=None, bg_color=None, use_bgclip=False, is_mask=False)[source]#
-

A VideoClip made of other videoclips displayed together. This is the -base class for most compositions.

-
-
Parameters:
-
    -
  • size – The size (width, height) of the final clip.

  • -
  • clips

    A list of videoclips.

    -

    Clips with a higher layer attribute will be displayed -on top of other clips in a lower layer. -If two or more clips share the same layer, -then the one appearing latest in clips will be displayed -on top (i.e. it has the higher layer).

    -

    For each clip:

    -
      -
    • -
      The attribute pos determines where the clip is placed.

      See VideoClip.set_pos

      -
      -
      -
    • -
    • The mask of the clip determines which parts are visible.

    • -
    -

    Finally, if all the clips in the list have their duration -attribute set, then the duration of the composite video clip -is computed automatically

    -

  • -
  • bg_color – Color for the unmasked and unfilled regions. Set to None for these -regions to be transparent (will be slower).

  • -
  • use_bgclip – Set to True if the first clip in the list should be used as the -‘background’ on which all other clips are blitted. That first clip must -have the same size as the final clip. If it has no transparency, the final -clip will have no mask.

  • -
  • clip. (The clip with the highest FPS will be the FPS of the composite) –

  • -
-
-
-
-
-close()[source]#
-

Closes the instance, releasing all the resources.

-
- -
-
-make_frame(t)[source]#
-

The clips playing at time t are blitted over one another.

-
- -
-
-playing_clips(t=0)[source]#
-

Returns a list of the clips in the composite clips that are -actually playing at the given time t.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.compositing.CompositeVideoClip.clips_array.html b/reference/reference/moviepy.video.compositing.CompositeVideoClip.clips_array.html deleted file mode 100644 index 3995980a0..000000000 --- a/reference/reference/moviepy.video.compositing.CompositeVideoClip.clips_array.html +++ /dev/null @@ -1,697 +0,0 @@ - - - - - - - - - - - - moviepy.video.compositing.CompositeVideoClip.clips_array — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.compositing.CompositeVideoClip.clips_array#

-
-
-moviepy.video.compositing.CompositeVideoClip.clips_array(array, rows_widths=None, cols_heights=None, bg_color=None)[source]#
-

Given a matrix whose rows are clips, creates a CompositeVideoClip where -all clips are placed side by side horizontally for each clip in each row -and one row on top of the other for each row. So given next matrix of clips -with same size:

-

`python -clips_array([[clip1, clip2, clip3], [clip4, clip5, clip6]]) -`

-

the result will be a CompositeVideoClip with a layout displayed like:

-

` -┏━━━━━━━┳━━━━━━━┳━━━━━━━┓ -                     - clip1 clip2 clip3 -                     -┣━━━━━━━╋━━━━━━━╋━━━━━━━┫ -                     - clip4 clip5 clip6 -                     -┗━━━━━━━┻━━━━━━━┻━━━━━━━┛ -`

-

If some clips doesn’t fulfill the space required by the rows or columns -in which are placed, that space will be filled by the color defined in -bg_color.

-
-
array

Matrix of clips included in the returned composited video clip.

-
-
rows_widths

Widths of the different rows in pixels. If None, is set automatically.

-
-
cols_heights

Heights of the different columns in pixels. If None, is set automatically.

-
-
bg_color

Fill color for the masked and unfilled regions. Set to None for these -regions to be transparent (processing will be slower).

-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips.html b/reference/reference/moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips.html deleted file mode 100644 index 9c7dbaa1e..000000000 --- a/reference/reference/moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips.html +++ /dev/null @@ -1,701 +0,0 @@ - - - - - - - - - - - - moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips#

-
-
-moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips(clips, method='chain', transition=None, bg_color=None, is_mask=False, padding=0)[source]#
-

Concatenates several video clips.

-

Returns a video clip made by clip by concatenating several video clips. -(Concatenated means that they will be played one after another).

-

There are two methods:

-
    -
  • method=”chain”: will produce a clip that simply outputs -the frames of the successive clips, without any correction if they are -not of the same size of anything. If none of the clips have masks the -resulting clip has no mask, else the mask is a concatenation of masks -(using completely opaque for clips that don’t have masks, obviously). -If you have clips of different size and you want to write directly the -result of the concatenation to a file, use the method “compose” instead.

  • -
  • method=”compose”, if the clips do not have the same resolution, the final -resolution will be such that no clip has to be resized. -As a consequence the final clip has the height of the highest clip and the -width of the widest clip of the list. All the clips with smaller dimensions -will appear centered. The border will be transparent if mask=True, else it -will be of the color specified by bg_color.

  • -
-

The clip with the highest FPS will be the FPS of the result clip.

-
-
Parameters:
-
    -
  • clips – A list of video clips which must all have their duration -attributes set.

  • -
  • method – “chain” or “compose”: see above.

  • -
  • transition – A clip that will be played between each two clips of the list.

  • -
  • bg_color – Only for method=’compose’. Color of the background. -Set to None for a transparent clip

  • -
  • padding – Only for method=’compose’. Duration during two consecutive clips. -Note that for negative padding, a clip will partly play at the same -time as the clip it follows (negative padding is cool for clips who fade -in on one another). A non-null padding automatically sets the method to -compose.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.compositing.CompositeVideoClip.html b/reference/reference/moviepy.video.compositing.CompositeVideoClip.html deleted file mode 100644 index 6c3864ef6..000000000 --- a/reference/reference/moviepy.video.compositing.CompositeVideoClip.html +++ /dev/null @@ -1,669 +0,0 @@ - - - - - - - - - - - - moviepy.video.compositing.CompositeVideoClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.compositing.CompositeVideoClip#

-

Main video composition interface of MoviePy.

-

Classes

- - - - - - -

CompositeVideoClip(clips[, size, bg_color, ...])

A VideoClip made of other videoclips displayed together.

-

Functions

- - - - - - - - - -

clips_array(array[, rows_widths, ...])

Given a matrix whose rows are clips, creates a CompositeVideoClip where all clips are placed side by side horizontally for each clip in each row and one row on top of the other for each row.

concatenate_videoclips(clips[, method, ...])

Concatenates several video clips.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.compositing.html b/reference/reference/moviepy.video.compositing.html deleted file mode 100644 index 7ac3d8070..000000000 --- a/reference/reference/moviepy.video.compositing.html +++ /dev/null @@ -1,658 +0,0 @@ - - - - - - - - - - - - moviepy.video.compositing — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - - - - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.AccelDecel.html b/reference/reference/moviepy.video.fx.AccelDecel.html deleted file mode 100644 index dd72717b9..000000000 --- a/reference/reference/moviepy.video.fx.AccelDecel.html +++ /dev/null @@ -1,716 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.AccelDecel — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.AccelDecel#

-
-
-class moviepy.video.fx.AccelDecel.AccelDecel(new_duration: float = None, abruptness: float = 1.0, soonness: float = 1.0)[source]#
-

Accelerates and decelerates a clip, useful for GIF making.

-
-
Parameters:
-
    -
  • new_duration (float) – Duration for the new transformed clip. If None, will be that of the -current clip.

  • -
  • abruptness (float) –

    Slope shape in the acceleration-deceleration function. It will depend -on the value of the parameter:

    -
      -
    • -1 < abruptness < 0: speed up, down, up.

    • -
    • abruptness == 0: no effect.

    • -
    • abruptness > 0: speed down, up, down.

    • -
    -

  • -
  • soonness (float) – For positive abruptness, determines how soon the transformation occurs. -Should be a positive number.

  • -
-
-
Raises:
-

ValueError – When sooness argument is lower than 0.

-
-
-

Examples

-

The following graphs show functions generated by different combinations -of arguments, where the value of the slopes represents the speed of the -videos generated, being the linear function (in red) a combination that -does not produce any transformation.

-acced_decel FX parameters combinations -
-
-apply(clip)[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.BlackAndWhite.html b/reference/reference/moviepy.video.fx.BlackAndWhite.html deleted file mode 100644 index 805dbf31f..000000000 --- a/reference/reference/moviepy.video.fx.BlackAndWhite.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.BlackAndWhite — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.BlackAndWhite#

-
-
-class moviepy.video.fx.BlackAndWhite.BlackAndWhite(RGB: str = None, preserve_luminosity: bool = True)[source]#
-

Desaturates the picture, makes it black and white. -Parameter RGB allows to set weights for the different color -channels. -If RBG is ‘CRT_phosphor’ a special set of values is used. -preserve_luminosity maintains the sum of RGB to 1.

-
-
-apply(clip)[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Blink.html b/reference/reference/moviepy.video.fx.Blink.html deleted file mode 100644 index 778803242..000000000 --- a/reference/reference/moviepy.video.fx.Blink.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Blink — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- - - - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Crop.html b/reference/reference/moviepy.video.fx.Crop.html deleted file mode 100644 index 8825e12fb..000000000 --- a/reference/reference/moviepy.video.fx.Crop.html +++ /dev/null @@ -1,713 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Crop — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.Crop#

-
-
-class moviepy.video.fx.Crop.Crop(x1: int = None, y1: int = None, x2: int = None, y2: int = None, width: int = None, height: int = None, x_center: int = None, y_center: int = None)[source]#
-

Effect to crop a clip to get a new clip in which just a rectangular -subregion of the original clip is conserved. x1,y1 indicates the top left -corner and x2,y2 is the lower right corner of the cropped region. All -coordinates are in pixels. Float numbers are accepted.

-

To crop an arbitrary rectangle:

-
>>> Crop(x1=50, y1=60, x2=460, y2=275)
-
-
-

Only remove the part above y=30:

-
>>> Crop(y1=30)
-
-
-

Crop a rectangle that starts 10 pixels left and is 200px wide

-
>>> Crop(x1=10, width=200)
-
-
-

Crop a rectangle centered in x,y=(300,400), width=50, height=150 :

-
>>> Crop(x_center=300, y_center=400, width=50, height=150)
-
-
-

Any combination of the above should work, like for this rectangle -centered in x=300, with explicit y-boundaries:

-
>>> Crop(x_center=300, width=400, y1=100, y2=600)
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.CrossFadeIn.html b/reference/reference/moviepy.video.fx.CrossFadeIn.html deleted file mode 100644 index 50ed1a8a5..000000000 --- a/reference/reference/moviepy.video.fx.CrossFadeIn.html +++ /dev/null @@ -1,690 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.CrossFadeIn — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.CrossFadeIn#

-
-
-class moviepy.video.fx.CrossFadeIn.CrossFadeIn(duration: float)[source]#
-

Makes the clip appear progressively, over duration seconds. -Only works when the clip is included in a CompositeVideoClip.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.CrossFadeOut.html b/reference/reference/moviepy.video.fx.CrossFadeOut.html deleted file mode 100644 index 0e9c48f28..000000000 --- a/reference/reference/moviepy.video.fx.CrossFadeOut.html +++ /dev/null @@ -1,690 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.CrossFadeOut — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.CrossFadeOut#

-
-
-class moviepy.video.fx.CrossFadeOut.CrossFadeOut(duration: float)[source]#
-

Makes the clip disappear progressively, over duration seconds. -Only works when the clip is included in a CompositeVideoClip.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.EvenSize.html b/reference/reference/moviepy.video.fx.EvenSize.html deleted file mode 100644 index fb28d3b4c..000000000 --- a/reference/reference/moviepy.video.fx.EvenSize.html +++ /dev/null @@ -1,689 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.EvenSize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.EvenSize#

-
-
-class moviepy.video.fx.EvenSize.EvenSize[source]#
-

Crops the clip to make dimensions even.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.FadeIn.html b/reference/reference/moviepy.video.fx.FadeIn.html deleted file mode 100644 index dda0c9b93..000000000 --- a/reference/reference/moviepy.video.fx.FadeIn.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.FadeIn — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.FadeIn#

-
-
-class moviepy.video.fx.FadeIn.FadeIn(duration: float, initial_color: list = None)[source]#
-

Makes the clip progressively appear from some color (black by default), -over duration seconds at the beginning of the clip. Can be used for -masks too, where the initial color must be a number between 0 and 1.

-

For cross-fading (progressive appearance or disappearance of a clip -over another clip, see CrossFadeIn

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.FadeOut.html b/reference/reference/moviepy.video.fx.FadeOut.html deleted file mode 100644 index 40f52d021..000000000 --- a/reference/reference/moviepy.video.fx.FadeOut.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.FadeOut — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.FadeOut#

-
-
-class moviepy.video.fx.FadeOut.FadeOut(duration: float, final_color: list = None)[source]#
-

Makes the clip progressively fade to some color (black by default), -over duration seconds at the end of the clip. Can be used for masks too, -where the final color must be a number between 0 and 1.

-

For cross-fading (progressive appearance or disappearance of a clip over another -clip), see CrossFadeOut

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Freeze.html b/reference/reference/moviepy.video.fx.Freeze.html deleted file mode 100644 index c3f830534..000000000 --- a/reference/reference/moviepy.video.fx.Freeze.html +++ /dev/null @@ -1,695 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Freeze — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.Freeze#

-
-
-class moviepy.video.fx.Freeze.Freeze(t: float = 0, freeze_duration: float = None, total_duration: float = None, padding_end: float = 0)[source]#
-

Momentarily freeze the clip at time t.

-

Set t=’end’ to freeze the clip at the end (actually it will freeze on the -frame at time clip.duration - padding_end seconds - 1 / clip_fps). -With duration you can specify the duration of the freeze. -With total_duration you can specify the total duration of -the clip and the freeze (i.e. the duration of the freeze is -automatically computed). One of them must be provided.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.FreezeRegion.html b/reference/reference/moviepy.video.fx.FreezeRegion.html deleted file mode 100644 index 0a0f747d9..000000000 --- a/reference/reference/moviepy.video.fx.FreezeRegion.html +++ /dev/null @@ -1,705 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.FreezeRegion — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.FreezeRegion#

-
-
-class moviepy.video.fx.FreezeRegion.FreezeRegion(t: float = 0, region: tuple = None, outside_region: tuple = None, mask: Clip = None)[source]#
-

Freezes one region of the clip while the rest remains animated.

-

You can choose one of three methods by providing either region, -outside_region, or mask.

-
-
Parameters:
-
    -
  • t (float) – Time at which to freeze the freezed region.

  • -
  • region (tuple) – A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) -which will be freezed. You can provide outside_region or mask instead.

  • -
  • outside_region (tuple) – A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) -which will be the only non-freezed region.

  • -
  • mask (moviepy.Clip.Clip) – If not None, will overlay a freezed version of the clip on the current clip, -with the provided mask. In other words, the “visible” pixels in the mask -indicate the freezed region in the final picture.

  • -
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.GammaCorrection.html b/reference/reference/moviepy.video.fx.GammaCorrection.html deleted file mode 100644 index 671ed10f3..000000000 --- a/reference/reference/moviepy.video.fx.GammaCorrection.html +++ /dev/null @@ -1,689 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.GammaCorrection — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.GammaCorrection#

-
-
-class moviepy.video.fx.GammaCorrection.GammaCorrection(gamma: float)[source]#
-

Gamma-correction of a video clip.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.HeadBlur.html b/reference/reference/moviepy.video.fx.HeadBlur.html deleted file mode 100644 index 4ff334e49..000000000 --- a/reference/reference/moviepy.video.fx.HeadBlur.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.HeadBlur — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.HeadBlur#

-
-
-class moviepy.video.fx.HeadBlur.HeadBlur(fx: callable, fy: callable, radius: float, intensity: float = None)[source]#
-

Returns a filter that will blur a moving part (a head ?) of the frames.

-

The position of the blur at time t is defined by (fx(t), fy(t)), the radius -of the blurring by radius and the intensity of the blurring by intensity.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.InvertColors.html b/reference/reference/moviepy.video.fx.InvertColors.html deleted file mode 100644 index 9f3edf38c..000000000 --- a/reference/reference/moviepy.video.fx.InvertColors.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.InvertColors — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.InvertColors#

-
-
-class moviepy.video.fx.InvertColors.InvertColors[source]#
-

Returns the color-inversed clip.

-

The values of all pixels are replaced with (255-v) or (1-v) for masks -Black becomes white, green becomes purple, etc.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Loop.html b/reference/reference/moviepy.video.fx.Loop.html deleted file mode 100644 index 8aa565df7..000000000 --- a/reference/reference/moviepy.video.fx.Loop.html +++ /dev/null @@ -1,699 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Loop — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.Loop#

-
-
-class moviepy.video.fx.Loop.Loop(n: int = None, duration: float = None)[source]#
-

Returns a clip that plays the current clip in an infinite loop. -Ideal for clips coming from GIFs.

-
-
Parameters:
-
    -
  • n (int) – Number of times the clip should be played. If None the -the clip will loop indefinitely (i.e. with no set duration).

  • -
  • duration (float) – Total duration of the clip. Can be specified instead of n.

  • -
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.LumContrast.html b/reference/reference/moviepy.video.fx.LumContrast.html deleted file mode 100644 index 29ce6fdf4..000000000 --- a/reference/reference/moviepy.video.fx.LumContrast.html +++ /dev/null @@ -1,689 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.LumContrast — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.LumContrast#

-
-
-class moviepy.video.fx.LumContrast.LumContrast(lum: float = 0, contrast: float = 0, contrast_threshold: float = 127)[source]#
-

Luminosity-contrast correction of a clip.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.MakeLoopable.html b/reference/reference/moviepy.video.fx.MakeLoopable.html deleted file mode 100644 index ad1d331fe..000000000 --- a/reference/reference/moviepy.video.fx.MakeLoopable.html +++ /dev/null @@ -1,695 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.MakeLoopable — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.MakeLoopable#

-
-
-class moviepy.video.fx.MakeLoopable.MakeLoopable(overlap_duration: float)[source]#
-

Makes the clip fade in progressively at its own end, this way it can be -looped indefinitely.

-
-
Parameters:
-

overlap_duration (float) – Duration of the fade-in (in seconds).

-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Margin.html b/reference/reference/moviepy.video.fx.Margin.html deleted file mode 100644 index 409d5833a..000000000 --- a/reference/reference/moviepy.video.fx.Margin.html +++ /dev/null @@ -1,710 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Margin — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.Margin#

-
-
-class moviepy.video.fx.Margin.Margin(margin_size: int = None, left: int = 0, right: int = 0, top: int = 0, bottom: int = 0, color: tuple = (0, 0, 0), opacity: float = 1.0)[source]#
-

Draws an external margin all around the frame.

-
-
Parameters:
-
    -
  • margin_size (int, optional) – If not None, then the new clip has a margin size of -size margin_size in pixels on the left, right, top, and bottom.

  • -
  • left (int, optional) – If margin_size=None, margin size for the new clip in left direction.

  • -
  • right (int, optional) – If margin_size=None, margin size for the new clip in right direction.

  • -
  • top (int, optional) – If margin_size=None, margin size for the new clip in top direction.

  • -
  • bottom (int, optional) – If margin_size=None, margin size for the new clip in bottom direction.

  • -
  • color (tuple, optional) – Color of the margin.

  • -
  • opacity (float, optional) – Opacity of the margin. Setting this value to 0 yields transparent margins.

  • -
-
-
-
-
-add_margin(clip: Clip)[source]#
-

Add margins to the clip.

-
- -
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.MaskColor.html b/reference/reference/moviepy.video.fx.MaskColor.html deleted file mode 100644 index 5fe7de877..000000000 --- a/reference/reference/moviepy.video.fx.MaskColor.html +++ /dev/null @@ -1,696 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.MaskColor — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.MaskColor#

-
-
-class moviepy.video.fx.MaskColor.MaskColor(color: tuple = (0, 0, 0), threshold: float = 0, stiffness: float = 1)[source]#
-

Returns a new clip with a mask for transparency where the original -clip is of the given color.

-

You can also have a “progressive” mask by specifying a non-null distance -threshold threshold. In this case, if the distance between a pixel and -the given color is d, the transparency will be

-

d**stiffness / (threshold**stiffness + d**stiffness)

-

which is 1 when d>>threshold and 0 for d<<threshold, the stiffness of the -effect being parametrized by stiffness

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.MasksAnd.html b/reference/reference/moviepy.video.fx.MasksAnd.html deleted file mode 100644 index 1305842f8..000000000 --- a/reference/reference/moviepy.video.fx.MasksAnd.html +++ /dev/null @@ -1,703 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.MasksAnd — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.MasksAnd#

-
-
-class moviepy.video.fx.MasksAnd.MasksAnd(other_clip: Clip | ndarray)[source]#
-

Returns the logical ‘and’ (minimum pixel color values) between two masks.

-

The result has the duration of the clip to which has been applied, if it has any.

-
-
Parameters:
-

np.ndarray (other_clip ImageClip or) – Clip used to mask the original clip.

-
-
-

Examples

-
>>> clip = ColorClip(color=(255, 0, 0), size=(1, 1))      # red
->>> mask = ColorClip(color=(0, 255, 0), size=(1, 1))      # green
->>> masked_clip = clip.with_effects([vfx.MasksAnd(mask)]) # black
->>> masked_clip.get_frame(0)
-[[[0 0 0]]]
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.MasksOr.html b/reference/reference/moviepy.video.fx.MasksOr.html deleted file mode 100644 index f27237109..000000000 --- a/reference/reference/moviepy.video.fx.MasksOr.html +++ /dev/null @@ -1,703 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.MasksOr — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.MasksOr#

-
-
-class moviepy.video.fx.MasksOr.MasksOr(other_clip: Clip | ndarray)[source]#
-

Returns the logical ‘or’ (maximum pixel color values) between two masks.

-

The result has the duration of the clip to which has been applied, if it has any.

-
-
Parameters:
-

np.ndarray (other_clip ImageClip or) – Clip used to mask the original clip.

-
-
-

Examples

-
>>> clip = ColorClip(color=(255, 0, 0), size=(1, 1))     # red
->>> mask = ColorClip(color=(0, 255, 0), size=(1, 1))     # green
->>> masked_clip = clip.with_effects([vfx.MasksOr(mask)]) # yellow
->>> masked_clip.get_frame(0)
-[[[255 255   0]]]
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.MirrorX.html b/reference/reference/moviepy.video.fx.MirrorX.html deleted file mode 100644 index 5b0d2fbb1..000000000 --- a/reference/reference/moviepy.video.fx.MirrorX.html +++ /dev/null @@ -1,689 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.MirrorX — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.MirrorX#

-
-
-class moviepy.video.fx.MirrorX.MirrorX(apply_to: List | str = 'mask')[source]#
-

Flips the clip horizontally (and its mask too, by default).

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.MirrorY.html b/reference/reference/moviepy.video.fx.MirrorY.html deleted file mode 100644 index c3b7f2968..000000000 --- a/reference/reference/moviepy.video.fx.MirrorY.html +++ /dev/null @@ -1,689 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.MirrorY — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.MirrorY#

-
-
-class moviepy.video.fx.MirrorY.MirrorY(apply_to: List | str = 'mask')[source]#
-

Flips the clip vertically (and its mask too, by default).

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.MultiplyColor.html b/reference/reference/moviepy.video.fx.MultiplyColor.html deleted file mode 100644 index b01a6265d..000000000 --- a/reference/reference/moviepy.video.fx.MultiplyColor.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.MultiplyColor — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.MultiplyColor#

-
-
-class moviepy.video.fx.MultiplyColor.MultiplyColor(factor: float)[source]#
-

Multiplies the clip’s colors by the given factor, can be used -to decrease or increase the clip’s brightness (is that the -right word ?)

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.MultiplySpeed.html b/reference/reference/moviepy.video.fx.MultiplySpeed.html deleted file mode 100644 index 2875ff64b..000000000 --- a/reference/reference/moviepy.video.fx.MultiplySpeed.html +++ /dev/null @@ -1,692 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.MultiplySpeed — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.MultiplySpeed#

-
-
-class moviepy.video.fx.MultiplySpeed.MultiplySpeed(factor: float = None, final_duration: float = None)[source]#
-

Returns a clip playing the current clip but at a speed multiplied by factor.

-

Instead of factor one can indicate the desired final_duration of the clip, and -the factor will be automatically computed. The same effect is applied to the clip’s -audio and mask if any.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Painting.html b/reference/reference/moviepy.video.fx.Painting.html deleted file mode 100644 index e21426944..000000000 --- a/reference/reference/moviepy.video.fx.Painting.html +++ /dev/null @@ -1,704 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Painting — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.Painting#

-
-
-class moviepy.video.fx.Painting.Painting(saturation: float = 1.4, black: float = 0.006)[source]#
-

Transforms any photo into some kind of painting.

-

Transforms any photo into some kind of painting. Saturation -tells at which point the colors of the result should be -flashy. black gives the amount of black lines wanted.

-

np_image : a numpy image

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
-
-to_painting(np_image, saturation=1.4, black=0.006)[source]#
-

Transforms any photo into some kind of painting.

-

Transforms any photo into some kind of painting. Saturation -tells at which point the colors of the result should be -flashy. black gives the amount of black lines wanted.

-

np_image : a numpy image

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Resize.html b/reference/reference/moviepy.video.fx.Resize.html deleted file mode 100644 index b254fe265..000000000 --- a/reference/reference/moviepy.video.fx.Resize.html +++ /dev/null @@ -1,717 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Resize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.Resize#

-
-
-class moviepy.video.fx.Resize.Resize(new_size: tuple | float | callable = None, height: int = None, width: int = None, apply_to_mask: bool = True)[source]#
-

Effect returning a video clip that is a resized version of the clip.

-
-
Parameters:
-
    -
  • new_size (tuple or float or function, optional) – Can be either -- (width, height) in pixels or a float representing -- A scaling factor, like 0.5. -- A function of time returning one of these.

  • -
  • height (int, optional) – Height of the new clip in pixels. The width is then computed so -that the width/height ratio is conserved.

  • -
  • width (int, optional) – Width of the new clip in pixels. The height is then computed so -that the width/height ratio is conserved.

  • -
-
-
-

Examples

-
>>> myClip.with_effects([vfx.Resize((460,720))]) # New resolution: (460,720)
->>> myClip.with_effects([vfx.Resize(0.6)]) # width and height multiplied by 0.6
->>> myClip.with_effects([vfx.Resize(width=800)]) # height computed automatically.
->>> myClip.with_effects([vfx.Resize(lambda t : 1+0.02*t)]) # slow clip swelling
-
-
-
-
-apply(clip)[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
-
-resizer(pic, new_size)[source]#
-

Resize the image using PIL.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Rotate.html b/reference/reference/moviepy.video.fx.Rotate.html deleted file mode 100644 index 5f03a8b74..000000000 --- a/reference/reference/moviepy.video.fx.Rotate.html +++ /dev/null @@ -1,724 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Rotate — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.Rotate#

-
-
-class moviepy.video.fx.Rotate.Rotate(angle: float, unit: str = 'deg', resample: str = 'bicubic', expand: bool = True, center: tuple = None, translate: tuple = None, bg_color: tuple = None)[source]#
-

Rotates the specified clip by angle degrees (or radians) anticlockwise -If the angle is not a multiple of 90 (degrees) or center, translate, -and bg_color are not None, there will be black borders. -You can make them transparent with:

-
>>> new_clip = clip.with_add_mask().rotate(72)
-
-
-
-
Parameters:
-
    -
  • clip (VideoClip) –

  • -
  • clip. (A video) –

  • -
  • angle (float) –

  • -
  • rotation. (Either a value or a function angle(t) representing the angle of) –

  • -
  • unit (str, optional) –

  • -
  • radians). (Unit of parameter angle (either "deg" for degrees or "rad" for) –

  • -
  • resample (str, optional) –

  • -
  • "nearest" (An optional resampling filter. One of) –

  • -
  • "bilinear"

  • -
  • "bicubic". (or) –

  • -
  • expand (bool, optional) –

  • -
  • true (If) –

  • -
  • the (expands the output image to make it large enough to hold) –

  • -
  • omitted (entire rotated image. If false or) –

  • -
  • same (make the output image the) –

  • -
  • image. (size as the input) –

  • -
  • translate (tuple, optional) –

  • -
  • 2-tuple). (An optional post-rotate translation (a) –

  • -
  • center (tuple, optional) –

  • -
  • corner. (Optional center of rotation (a 2-tuple). Origin is the upper left) –

  • -
  • bg_color (tuple, optional) –

  • -
  • if (An optional color for area outside the rotated image. Only has effect) –

  • -
  • true. (expand is) –

  • -
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.Scroll.html b/reference/reference/moviepy.video.fx.Scroll.html deleted file mode 100644 index ce5a4b580..000000000 --- a/reference/reference/moviepy.video.fx.Scroll.html +++ /dev/null @@ -1,705 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.Scroll — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.Scroll#

-
-
-class moviepy.video.fx.Scroll.Scroll(w=None, h=None, x_speed=0, y_speed=0, x_start=0, y_start=0, apply_to='mask')[source]#
-

Effect that scrolls horizontally or vertically a clip, e.g. to make end credits

-
-
Parameters:
-
    -
  • w – The width and height of the final clip. Default to clip.w and clip.h

  • -
  • h – The width and height of the final clip. Default to clip.w and clip.h

  • -
  • x_speed – The speed of the scroll in the x and y directions.

  • -
  • y_speed – The speed of the scroll in the x and y directions.

  • -
  • x_start – The starting position of the scroll in the x and y directions.

  • -
  • y_start – The starting position of the scroll in the x and y directions.

  • -
-
-
-
-
apply_to

Whether to apply the effect to the mask too.

-
-
-
-
-apply(clip)[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.SlideIn.html b/reference/reference/moviepy.video.fx.SlideIn.html deleted file mode 100644 index 0e29bb4ba..000000000 --- a/reference/reference/moviepy.video.fx.SlideIn.html +++ /dev/null @@ -1,717 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.SlideIn — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.SlideIn#

-
-
-class moviepy.video.fx.SlideIn.SlideIn(duration: float, side: str)[source]#
-

Makes the clip arrive from one side of the screen.

-

Only works when the clip is included in a CompositeVideoClip, -and if the clip has the same size as the whole composition.

-
-
Parameters:
-
    -
  • clip (moviepy.Clip.Clip) – A video clip.

  • -
  • duration (float) – Time taken for the clip to be fully visible

  • -
  • side (str) – Side of the screen where the clip comes from. One of -‘top’, ‘bottom’, ‘left’ or ‘right’.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>>
->>> clips = [... make a list of clips]
->>> slided_clips = [
-...     CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "left")])])
-...     for clip in clips
-... ]
->>> final_clip = concatenate_videoclips(slided_clips, padding=-1)
->>>
->>> clip = ColorClip(
-...     color=(255, 0, 0), duration=1, size=(300, 300)
-... ).with_fps(60)
->>> final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideIn(1, "right")])])
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.SlideOut.html b/reference/reference/moviepy.video.fx.SlideOut.html deleted file mode 100644 index b130a5b3b..000000000 --- a/reference/reference/moviepy.video.fx.SlideOut.html +++ /dev/null @@ -1,717 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.SlideOut — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.SlideOut#

-
-
-class moviepy.video.fx.SlideOut.SlideOut(duration: float, side: str)[source]#
-

Makes the clip goes away by one side of the screen.

-

Only works when the clip is included in a CompositeVideoClip, -and if the clip has the same size as the whole composition.

-
-
Parameters:
-
    -
  • clip (moviepy.Clip.Clip) – A video clip.

  • -
  • duration (float) – Time taken for the clip to be fully visible

  • -
  • side (str) – Side of the screen where the clip goes. One of -‘top’, ‘bottom’, ‘left’ or ‘right’.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>>
->>> clips = [... make a list of clips]
->>> slided_clips = [
-...     CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "left")])])
-...     for clip in clips
-... ]
->>> final_clip = concatenate_videoclips(slided_clips, padding=-1)
->>>
->>> clip = ColorClip(
-...     color=(255, 0, 0), duration=1, size=(300, 300)
-... ).with_fps(60)
->>> final_clip = CompositeVideoClip([clip.with_effects([vfx.SlideOut(1, "right")])])
-
-
-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.SuperSample.html b/reference/reference/moviepy.video.fx.SuperSample.html deleted file mode 100644 index 745c775cf..000000000 --- a/reference/reference/moviepy.video.fx.SuperSample.html +++ /dev/null @@ -1,690 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.SuperSample — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.SuperSample#

-
-
-class moviepy.video.fx.SuperSample.SuperSample(d: float, n_frames: int)[source]#
-

Replaces each frame at time t by the mean of n_frames equally spaced frames -taken in the interval [t-d, t+d]. This results in motion blur.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.TimeMirror.html b/reference/reference/moviepy.video.fx.TimeMirror.html deleted file mode 100644 index bb3d6fb1b..000000000 --- a/reference/reference/moviepy.video.fx.TimeMirror.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.TimeMirror — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.TimeMirror#

-
-
-class moviepy.video.fx.TimeMirror.TimeMirror[source]#
-

Returns a clip that plays the current clip backwards. -The clip must have its duration attribute set. -The same effect is applied to the clip’s audio and mask if any.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.TimeSymmetrize.html b/reference/reference/moviepy.video.fx.TimeSymmetrize.html deleted file mode 100644 index 371b26943..000000000 --- a/reference/reference/moviepy.video.fx.TimeSymmetrize.html +++ /dev/null @@ -1,693 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx.TimeSymmetrize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx.TimeSymmetrize#

-
-
-class moviepy.video.fx.TimeSymmetrize.TimeSymmetrize[source]#
-

Returns a clip that plays the current clip once forwards and -then once backwards. This is very practival to make video that -loop well, e.g. to create animated GIFs. -This effect is automatically applied to the clip’s mask and audio -if they exist.

-
-
-apply(clip: Clip) Clip[source]#
-

Apply the effect to the clip.

-
- -
-
-copy()#
-

Return a shallow copy of an Effect.

-

You must always copy an Effect before applying, -because some of them will modify their own attributes when applied. -For example, setting a previously unset property by using target clip property.

-

If we was to use the original effect, calling the same effect multiple times -could lead to different properties, and different results for equivalent clips.

-

By using copy, we ensure we can use the same effect object multiple times while -maintaining the same behavior/result.

-

In a way, copy makes the effect himself being kind of idempotent.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.fx.html b/reference/reference/moviepy.video.fx.html deleted file mode 100644 index 150440b76..000000000 --- a/reference/reference/moviepy.video.fx.html +++ /dev/null @@ -1,757 +0,0 @@ - - - - - - - - - - - - moviepy.video.fx — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.fx#

-

All the visual effects that can be applied to VideoClip.

-

Modules

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

moviepy.video.fx.AccelDecel([new_duration, ...])

Accelerates and decelerates a clip, useful for GIF making.

moviepy.video.fx.BlackAndWhite([RGB, ...])

Desaturates the picture, makes it black and white.

moviepy.video.fx.Blink(duration_on, duration_off)

Makes the clip blink.

moviepy.video.fx.Crop([x1, y1, x2, y2, ...])

Effect to crop a clip to get a new clip in which just a rectangular subregion of the original clip is conserved.

moviepy.video.fx.CrossFadeIn(duration)

Makes the clip appear progressively, over duration seconds.

moviepy.video.fx.CrossFadeOut(duration)

Makes the clip disappear progressively, over duration seconds.

moviepy.video.fx.EvenSize()

Crops the clip to make dimensions even.

moviepy.video.fx.FadeIn(duration[, ...])

Makes the clip progressively appear from some color (black by default), over duration seconds at the beginning of the clip.

moviepy.video.fx.FadeOut(duration[, final_color])

Makes the clip progressively fade to some color (black by default), over duration seconds at the end of the clip.

moviepy.video.fx.Freeze([t, ...])

Momentarily freeze the clip at time t.

moviepy.video.fx.FreezeRegion([t, region, ...])

Freezes one region of the clip while the rest remains animated.

moviepy.video.fx.GammaCorrection(gamma)

Gamma-correction of a video clip.

moviepy.video.fx.HeadBlur(fx, fy, radius[, ...])

Returns a filter that will blur a moving part (a head ?) of the frames.

moviepy.video.fx.InvertColors()

Returns the color-inversed clip.

moviepy.video.fx.Loop([n, duration])

Returns a clip that plays the current clip in an infinite loop.

moviepy.video.fx.LumContrast([lum, ...])

Luminosity-contrast correction of a clip.

moviepy.video.fx.MakeLoopable(overlap_duration)

Makes the clip fade in progressively at its own end, this way it can be looped indefinitely.

moviepy.video.fx.Margin([margin_size, left, ...])

Draws an external margin all around the frame.

moviepy.video.fx.MaskColor([color, ...])

Returns a new clip with a mask for transparency where the original clip is of the given color.

moviepy.video.fx.MasksAnd(other_clip)

Returns the logical 'and' (minimum pixel color values) between two masks.

moviepy.video.fx.MasksOr(other_clip)

Returns the logical 'or' (maximum pixel color values) between two masks.

moviepy.video.fx.MirrorX([apply_to])

Flips the clip horizontally (and its mask too, by default).

moviepy.video.fx.MirrorY([apply_to])

Flips the clip vertically (and its mask too, by default).

moviepy.video.fx.MultiplyColor(factor)

Multiplies the clip's colors by the given factor, can be used to decrease or increase the clip's brightness (is that the right word ?)

moviepy.video.fx.MultiplySpeed([factor, ...])

Returns a clip playing the current clip but at a speed multiplied by factor.

moviepy.video.fx.Painting([saturation, black])

Transforms any photo into some kind of painting.

moviepy.video.fx.Resize([new_size, height, ...])

Effect returning a video clip that is a resized version of the clip.

moviepy.video.fx.Rotate(angle[, unit, ...])

Rotates the specified clip by angle degrees (or radians) anticlockwise If the angle is not a multiple of 90 (degrees) or center, translate, and bg_color are not None, there will be black borders.

moviepy.video.fx.Scroll([w, h, x_speed, ...])

Effect that scrolls horizontally or vertically a clip, e.g.

moviepy.video.fx.SlideIn(duration, side)

Makes the clip arrive from one side of the screen.

moviepy.video.fx.SlideOut(duration, side)

Makes the clip goes away by one side of the screen.

moviepy.video.fx.SuperSample(d, n_frames)

Replaces each frame at time t by the mean of n_frames equally spaced frames taken in the interval [t-d, t+d].

moviepy.video.fx.TimeMirror()

Returns a clip that plays the current clip backwards.

moviepy.video.fx.TimeSymmetrize()

Returns a clip that plays the current clip once forwards and then once backwards.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.html b/reference/reference/moviepy.video.html deleted file mode 100644 index bfc798c1d..000000000 --- a/reference/reference/moviepy.video.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - - - - - - - moviepy.video — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - - - - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ImageSequenceClip.ImageSequenceClip.html b/reference/reference/moviepy.video.io.ImageSequenceClip.ImageSequenceClip.html deleted file mode 100644 index 1706fb3e1..000000000 --- a/reference/reference/moviepy.video.io.ImageSequenceClip.ImageSequenceClip.html +++ /dev/null @@ -1,689 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ImageSequenceClip.ImageSequenceClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ImageSequenceClip.ImageSequenceClip#

-
-
-class moviepy.video.io.ImageSequenceClip.ImageSequenceClip(sequence, fps=None, durations=None, with_mask=True, is_mask=False, load_images=False)[source]#
-

A VideoClip made from a series of images.

-
-
Parameters:
-
    -
  • sequence

    Can be one of these:

    -
      -
    • The name of a folder (containing only pictures). The pictures -will be considered in alphanumerical order.

    • -
    • A list of names of image files. In this case you can choose to -load the pictures in memory pictures

    • -
    • A list of Numpy arrays representing images. In this last case, -masks are not supported currently.

    • -
    -

  • -
  • fps – Number of picture frames to read per second. Instead, you can provide -the duration of each image with durations (see below)

  • -
  • durations – List of the duration of each picture.

  • -
  • with_mask – Should the alpha layer of PNG images be considered as a mask ?

  • -
  • is_mask – Will this sequence of pictures be used as an animated mask.

  • -
  • load_images – Specify that all images should be loaded into the RAM. This is only -interesting if you have a small number of images that will be used -more than once.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ImageSequenceClip.html b/reference/reference/moviepy.video.io.ImageSequenceClip.html deleted file mode 100644 index 36da8258f..000000000 --- a/reference/reference/moviepy.video.io.ImageSequenceClip.html +++ /dev/null @@ -1,659 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ImageSequenceClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ImageSequenceClip#

-

Implements ImageSequenceClip, a class to create a video clip from a set -of image files.

-

Classes

- - - - - - -

ImageSequenceClip(sequence[, fps, ...])

A VideoClip made from a series of images.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.VideoFileClip.VideoFileClip.html b/reference/reference/moviepy.video.io.VideoFileClip.VideoFileClip.html deleted file mode 100644 index fc099e90e..000000000 --- a/reference/reference/moviepy.video.io.VideoFileClip.VideoFileClip.html +++ /dev/null @@ -1,733 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.VideoFileClip.VideoFileClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.VideoFileClip.VideoFileClip#

-
-
-class moviepy.video.io.VideoFileClip.VideoFileClip(filename, decode_file=False, has_mask=False, audio=True, audio_buffersize=200000, target_resolution=None, resize_algorithm='bicubic', audio_fps=44100, audio_nbytes=2, fps_source='fps', pixel_format=None, is_mask=False)[source]#
-

A video clip originating from a movie file. For instance:

-
>>> clip = VideoFileClip("myHolidays.mp4")
->>> clip.close()
->>> with VideoFileClip("myMaskVideo.avi") as clip2:
->>>    pass  # Implicit close called by context manager.
-
-
-
-
Parameters:
-
    -
  • filename – The name of the video file, as a string or a path-like object. -It can have any extension supported by ffmpeg: -.ogv, .mp4, .mpeg, .avi, .mov etc.

  • -
  • has_mask – Set this to ‘True’ if there is a mask included in the videofile. -Video files rarely contain masks, but some video codecs enable -that. For instance if you have a MoviePy VideoClip with a mask you -can save it to a videofile with a mask. (see also -VideoClip.write_videofile for more details).

  • -
  • audio – Set to False if the clip doesn’t have any audio or if you do not -wish to read the audio.

  • -
  • target_resolution – Set to (desired_width, desired_height) to have ffmpeg resize the frames -before returning them. This is much faster than streaming in high-res -and then resizing. If either dimension is None, the frames are resized -by keeping the existing aspect ratio.

  • -
  • resize_algorithm – The algorithm used for resizing. Default: “bicubic”, other popular -options include “bilinear” and “fast_bilinear”. For more information, see -https://ffmpeg.org/ffmpeg-scaler.html

  • -
  • fps_source – The fps value to collect from the metadata. Set by default to ‘fps’, but -can be set to ‘tbr’, which may be helpful if you are finding that it is reading -the incorrect fps from the file.

  • -
  • pixel_format – Optional: Pixel format for the video to read. If is not specified -‘rgb24’ will be used as the default format unless has_mask is set -as True, then ‘rgba’ will be used.

  • -
  • is_maskTrue if the clip is going to be used as a mask.

  • -
-
-
-
-
-filename#
-

Name of the original video file.

-
- -
-
-fps#
-

Frames per second in the original file.

-
- -

Read docs for Clip() and VideoClip() for other, more generic, attributes.

-
-

Lifetime#

-

Note that this creates subprocesses and locks files. If you construct one -of these instances, you must call close() afterwards, or the subresources -will not be cleaned up until the process ends.

-

If copies are made, and close() is called on one, it may cause methods on -the other copies to fail.

-
-
-close()[source]#
-

Close the internal reader.

-
- -
-
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.VideoFileClip.html b/reference/reference/moviepy.video.io.VideoFileClip.html deleted file mode 100644 index 39addcd06..000000000 --- a/reference/reference/moviepy.video.io.VideoFileClip.html +++ /dev/null @@ -1,658 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.VideoFileClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.VideoFileClip#

-

Implements VideoFileClip, a class for video clips creation using video files.

-

Classes

- - - - - - -

VideoFileClip(filename[, decode_file, ...])

A video clip originating from a movie file. For instance: ::.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.display_in_notebook.HTML2.html b/reference/reference/moviepy.video.io.display_in_notebook.HTML2.html deleted file mode 100644 index 2e9ce8bee..000000000 --- a/reference/reference/moviepy.video.io.display_in_notebook.HTML2.html +++ /dev/null @@ -1,664 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.display_in_notebook.HTML2 — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.display_in_notebook.HTML2#

-
-
-moviepy.video.io.display_in_notebook.HTML2(content)[source]#
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.display_in_notebook.display_in_notebook.html b/reference/reference/moviepy.video.io.display_in_notebook.display_in_notebook.html deleted file mode 100644 index 2a02df0b6..000000000 --- a/reference/reference/moviepy.video.io.display_in_notebook.display_in_notebook.html +++ /dev/null @@ -1,706 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.display_in_notebook.display_in_notebook — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.display_in_notebook.display_in_notebook#

-
-
-moviepy.video.io.display_in_notebook.display_in_notebook(clip, filetype=None, maxduration=60, t=None, fps=None, rd_kwargs=None, center=True, **html_kwargs)[source]#
-

Displays clip content in an Jupyter Notebook.

-

Remarks: If your browser doesn’t support HTML5, this should warn you. -If nothing is displayed, maybe your file or filename is wrong. -Important: The media will be physically embedded in the notebook.

-
-
Parameters:
-
    -
  • clip (moviepy.Clip.Clip) – Either the name of a file, or a clip to preview. The clip will actually -be written to a file and embedded as if a filename was provided.

  • -
  • filetype (str, optional) – One of "video", "image" or "audio". If None is given, it is -determined based on the extension of filename, but this can bug.

  • -
  • maxduration (float, optional) – An error will be raised if the clip’s duration is more than the indicated -value (in seconds), to avoid spoiling the browser’s cache and the RAM.

  • -
  • t (float, optional) – If not None, only the frame at time t will be displayed in the notebook, -instead of a video of the clip.

  • -
  • fps (int, optional) – Enables to specify an fps, as required for clips whose fps is unknown.

  • -
  • rd_kwargs (dict, optional) – Keyword arguments for the rendering, like dict(fps=15, bitrate="50k"). -Allow you to give some options to the render process. You can, for -example, disable the logger bar passing dict(logger=None).

  • -
  • center (bool, optional) – If true (default), the content will be wrapped in a -<div align=middle> HTML container, so the content will be displayed -at the center.

  • -
  • kwargs – Allow you to give some options, like width=260, etc. When editing -looping gifs, a good choice is loop=1, autoplay=1.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>> # later ...
->>> clip.display_in_notebook(width=360)
->>> clip.audio.display_in_notebook()
-
-
-
>>> clip.write_gif("test.gif")
->>> display_in_notebook('test.gif')
-
-
-
>>> clip.save_frame("first_frame.jpeg")
->>> display_in_notebook("first_frame.jpeg")
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.display_in_notebook.html b/reference/reference/moviepy.video.io.display_in_notebook.html deleted file mode 100644 index 2e3718d4a..000000000 --- a/reference/reference/moviepy.video.io.display_in_notebook.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.display_in_notebook — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.display_in_notebook#

-

Implements display_in_notebook, a function to embed images/videos/audio in the -Jupyter Notebook.

-

Functions

- - - - - - - - - - - - -

HTML2(content)

display_in_notebook(clip[, filetype, ...])

Displays clip content in an Jupyter Notebook.

html_embed(clip[, filetype, maxduration, ...])

Returns HTML5 code embedding the clip.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.display_in_notebook.html_embed.html b/reference/reference/moviepy.video.io.display_in_notebook.html_embed.html deleted file mode 100644 index a9ab1f112..000000000 --- a/reference/reference/moviepy.video.io.display_in_notebook.html_embed.html +++ /dev/null @@ -1,701 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.display_in_notebook.html_embed — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.display_in_notebook.html_embed#

-
-
-moviepy.video.io.display_in_notebook.html_embed(clip, filetype=None, maxduration=60, rd_kwargs=None, center=True, **html_kwargs)[source]#
-

Returns HTML5 code embedding the clip.

-
-
Parameters:
-
    -
  • clip (moviepy.Clip.Clip) – Either a file name, or a clip to preview. -Either an image, a sound or a video. Clips will actually be -written to a file and embedded as if a filename was provided.

  • -
  • filetype (str, optional) – One of ‘video’,’image’,’audio’. If None is given, it is determined -based on the extension of filename, but this can bug.

  • -
  • maxduration (float, optional) – An error will be raised if the clip’s duration is more than the indicated -value (in seconds), to avoid spoiling the browser’s cache and the RAM.

  • -
  • rd_kwargs (dict, optional) – Keyword arguments for the rendering, like dict(fps=15, bitrate="50k"). -Allow you to give some options to the render process. You can, for -example, disable the logger bar passing dict(logger=None).

  • -
  • center (bool, optional) – If true (default), the content will be wrapped in a -<div align=middle> HTML container, so the content will be displayed -at the center.

  • -
  • html_kwargs – Allow you to give some options, like width=260, autoplay=True, -loop=1 etc.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>> # later ...
->>> html_embed(clip, width=360)
->>> html_embed(clip.audio)
-
-
-
>>> clip.write_gif("test.gif")
->>> html_embed('test.gif')
-
-
-
>>> clip.save_frame("first_frame.jpeg")
->>> html_embed("first_frame.jpeg")
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.html b/reference/reference/moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.html deleted file mode 100644 index 7c80fdaf5..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.html +++ /dev/null @@ -1,717 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader#

-
-
-class moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader(filename, decode_file=True, print_infos=False, bufsize=None, pixel_format='rgb24', check_duration=True, target_resolution=None, resize_algo='bicubic', fps_source='fps')[source]#
-

Class for video byte-level reading with ffmpeg.

-
-
-close(delete_lastread=True)[source]#
-

Closes the reader terminating the process, if is still open.

-
- -
-
-get_frame(t)[source]#
-

Read a file video frame at time t.

-

Note for coders: getting an arbitrary frame in the video with -ffmpeg can be painfully slow if some decoding has to be done. -This function tries to avoid fetching arbitrary frames -whenever possible, by moving between adjacent frames.

-
- -
-
-get_frame_number(t)[source]#
-

Helper method to return the frame number at time t

-
- -
-
-initialize(start_time=0)[source]#
-

Opens the file, creates the pipe.

-

Sets self.pos to the appropriate value (1 if start_time == 0 because -it pre-reads the first frame).

-
- -
-
-read_frame()[source]#
-

Reads the next frame from the file. -Note that upon (re)initialization, the first frame will already have been read -and stored in self.lastread.

-
- -
-
-skip_frames(n=1)[source]#
-

Reads and throws away n frames

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.html b/reference/reference/moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.html deleted file mode 100644 index cb5db24a2..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.html +++ /dev/null @@ -1,750 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_reader.FFmpegInfosParser — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_reader.FFmpegInfosParser#

-
-
-class moviepy.video.io.ffmpeg_reader.FFmpegInfosParser(infos, filename, fps_source='fps', check_duration=True, decode_file=False)[source]#
-

Finite state ffmpeg -i command option file information parser. -Is designed to parse the output fast, in one loop. Iterates line by -line of the ffmpeg -i <filename> [-f null -] command output changing -the internal state of the parser.

-
-
Parameters:
-
    -
  • filename – Name of the file parsed, only used to raise accurate error messages.

  • -
  • infos – Information returned by FFmpeg.

  • -
  • fps_source – Indicates what source data will be preferably used to retrieve fps data.

  • -
  • check_duration – Enable or disable the parsing of the duration of the file. Useful to -skip the duration check, for example, for images.

  • -
  • decode_file – Indicates if the whole file has been decoded. The duration parsing strategy -will differ depending on this argument.

  • -
-
-
-
-
-parse()[source]#
-

Parses the information returned by FFmpeg in stderr executing their binary -for a file with -i option and returns a dictionary with all data needed -by MoviePy.

-
- -
-
-parse_audio_stream_data(line)[source]#
-

Parses data from “Stream … Audio” line.

-
- -
-
-parse_data_by_stream_type(stream_type, line)[source]#
-

Parses data from “Stream … {stream_type}” line.

-
- -
-
-parse_duration(line)[source]#
-

Parse the duration from the line that outputs the duration of -the container.

-
- -
-
-parse_fps(line)[source]#
-

Parses number of FPS from a line of the ffmpeg -i command output.

-
- -
-
-parse_metadata_field_value(line)[source]#
-

Returns a tuple with a metadata field-value pair given a ffmpeg -i -command output line.

-
- -
-
-parse_tbr(line)[source]#
-

Parses number of TBS from a line of the ffmpeg -i command output.

-
- -
-
-parse_video_stream_data(line)[source]#
-

Parses data from “Stream … Video” line.

-
- -
-
-video_metadata_type_casting(field, value)[source]#
-

Cast needed video metadata fields to other types than the default str.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos.html b/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos.html deleted file mode 100644 index f5556c1ef..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos.html +++ /dev/null @@ -1,697 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos#

-
-
-moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos(filename, check_duration=True, fps_source='fps', decode_file=False, print_infos=False)[source]#
-

Get the information of a file using ffmpeg.

-

Returns a dictionary with next fields:

-
    -
  • "duration"

  • -
  • "metadata"

  • -
  • "inputs"

  • -
  • "video_found"

  • -
  • "video_fps"

  • -
  • "video_n_frames"

  • -
  • "video_duration"

  • -
  • "video_bitrate"

  • -
  • "video_metadata"

  • -
  • "audio_found"

  • -
  • "audio_fps"

  • -
  • "audio_bitrate"

  • -
  • "audio_metadata"

  • -
-

Note that “video_duration” is slightly smaller than “duration” to avoid -fetching the incomplete frames at the end, which raises an error.

-
-
Parameters:
-
    -
  • filename – Name of the file parsed, only used to raise accurate error messages.

  • -
  • infos – Information returned by FFmpeg.

  • -
  • fps_source – Indicates what source data will be preferably used to retrieve fps data.

  • -
  • check_duration – Enable or disable the parsing of the duration of the file. Useful to -skip the duration check, for example, for images.

  • -
  • decode_file – Indicates if the whole file must be read to retrieve their duration. -This is needed for some files in order to get the correct duration (see -Zulko/moviepy#1222).

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_read_image.html b/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_read_image.html deleted file mode 100644 index 0ce2dbe86..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_read_image.html +++ /dev/null @@ -1,681 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_reader.ffmpeg_read_image — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_reader.ffmpeg_read_image#

-
-
-moviepy.video.io.ffmpeg_reader.ffmpeg_read_image(filename, with_mask=True, pixel_format=None)[source]#
-

Read an image file (PNG, BMP, JPEG…).

-

Wraps FFMPEG_Videoreader to read just one image. -Returns an ImageClip.

-

This function is not meant to be used directly in MoviePy. -Use ImageClip instead to make clips out of image files.

-
-
Parameters:
-
    -
  • filename – Name of the image file. Can be of any format supported by ffmpeg.

  • -
  • with_mask – If the image has a transparency layer, with_mask=true will save -this layer as the mask of the returned ImageClip

  • -
  • pixel_format – Optional: Pixel format for the image to read. If is not specified -‘rgb24’ will be used as the default format unless with_mask is set -as True, then ‘rgba’ will be used.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_reader.html b/reference/reference/moviepy.video.io.ffmpeg_reader.html deleted file mode 100644 index 48310e5fe..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_reader.html +++ /dev/null @@ -1,672 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_reader — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_reader#

-

Implements all the functions to read a video or a picture using ffmpeg.

-

Classes

- - - - - - - - - -

FFMPEG_VideoReader(filename[, decode_file, ...])

Class for video byte-level reading with ffmpeg.

FFmpegInfosParser(infos, filename[, ...])

Finite state ffmpeg -i command option file information parser.

-

Functions

- - - - - - - - - -

ffmpeg_parse_infos(filename[, ...])

Get the information of a file using ffmpeg.

ffmpeg_read_image(filename[, with_mask, ...])

Read an image file (PNG, BMP, JPEG...).

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio.html b/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio.html deleted file mode 100644 index 9a9af2a70..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio.html +++ /dev/null @@ -1,675 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio#

-
-
-moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio(inputfile, outputfile, bitrate=3000, fps=44100, logger='bar')[source]#
-

Extract the sound from a video file and save it in outputfile.

-
-
Parameters:
-
    -
  • inputfile (str) – The path to the file from which the audio will be extracted.

  • -
  • outputfile (str) – The path to the file to which the audio will be stored.

  • -
  • bitrate (int, optional) – Bitrate for the new audio file.

  • -
  • fps (int, optional) – Frame rate for the new audio file.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip.html b/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip.html deleted file mode 100644 index 1d61c6a31..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip#

-
-
-moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip(inputfile, start_time, end_time, outputfile=None, logger='bar')[source]#
-

Makes a new video file playing video file between two times.

-
-
Parameters:
-
    -
  • inputfile (str) – Path to the file from which the subclip will be extracted.

  • -
  • start_time (float) – Moment of the input clip that marks the start of the produced subclip.

  • -
  • end_time (float) – Moment of the input clip that marks the end of the produced subclip.

  • -
  • outputfile (str, optional) – Path to the output file. Defaults to -<inputfile_name>SUB<start_time>_<end_time><ext>.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio.html b/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio.html deleted file mode 100644 index 9a1465214..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio#

-
-
-moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio(videofile, audiofile, outputfile, video_codec='copy', audio_codec='copy', logger='bar')[source]#
-

Merges video file and audio file into one movie file.

-
-
Parameters:
-
    -
  • videofile (str) – Path to the video file used in the merge.

  • -
  • audiofile (str) – Path to the audio file used in the merge.

  • -
  • outputfile (str) – Path to the output file.

  • -
  • video_codec (str, optional) – Video codec used by FFmpeg in the merge.

  • -
  • audio_codec (str, optional) – Audio codec used by FFmpeg in the merge.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_resize.html b/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_resize.html deleted file mode 100644 index b0271040f..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_resize.html +++ /dev/null @@ -1,674 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_tools.ffmpeg_resize — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_tools.ffmpeg_resize#

-
-
-moviepy.video.io.ffmpeg_tools.ffmpeg_resize(inputfile, outputfile, size, logger='bar')[source]#
-

Resizes a file to new size and write the result in another.

-
-
Parameters:
-
    -
  • inputfile (str) – Path to the file to be resized.

  • -
  • outputfile (str) – Path to the output file.

  • -
  • size (list or tuple) – New size in format [width, height] for the output file.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video.html b/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video.html deleted file mode 100644 index 598526963..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video.html +++ /dev/null @@ -1,678 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video#

-
-
-moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video(inputfile, outputfile=None, output_dir='', overwrite_file=True, logger='bar')[source]#
-

Stabilizes filename and write the result to output.

-
-
Parameters:
-
    -
  • inputfile (str) – The name of the shaky video.

  • -
  • outputfile (str, optional) – The name of new stabilized video. Defaults to appending ‘_stabilized’ to -the input file name.

  • -
  • output_dir (str, optional) – The directory to place the output video in. Defaults to the current -working directory.

  • -
  • overwrite_file (bool, optional) – If outputfile already exists in output_dir, then overwrite -outputfile Defaults to True.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_tools.html b/reference/reference/moviepy.video.io.ffmpeg_tools.html deleted file mode 100644 index bdf94ff31..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_tools.html +++ /dev/null @@ -1,670 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_tools — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_tools#

-

Miscellaneous bindings to ffmpeg.

-

Functions

- - - - - - - - - - - - - - - - - - -

ffmpeg_extract_audio(inputfile, outputfile)

Extract the sound from a video file and save it in outputfile.

ffmpeg_extract_subclip(inputfile, ...[, ...])

Makes a new video file playing video file between two times.

ffmpeg_merge_video_audio(videofile, ...[, ...])

Merges video file and audio file into one movie file.

ffmpeg_resize(inputfile, outputfile, size[, ...])

Resizes a file to new size and write the result in another.

ffmpeg_stabilize_video(inputfile[, ...])

Stabilizes filename and write the result to output.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.html b/reference/reference/moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.html deleted file mode 100644 index 17c450164..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.html +++ /dev/null @@ -1,718 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter#

-
-
-class moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter(filename, size, fps, codec='libx264', audiofile=None, preset='medium', bitrate=None, with_mask=False, logfile=None, threads=None, ffmpeg_params=None, pixel_format=None)[source]#
-

A class for FFMPEG-based video writing.

-
-
Parameters:
-
    -
  • filename (str) – Any filename like "video.mp4" etc. but if you want to avoid -complications it is recommended to use the generic extension ".avi" -for all your videos.

  • -
  • size (tuple or list) – Size of the output video in pixels (width, height).

  • -
  • fps (int) – Frames per second in the output video file.

  • -
  • codec (str, optional) –

    FFMPEG codec. It seems that in terms of quality the hierarchy is -‘rawvideo’ = ‘png’ > ‘mpeg4’ > ‘libx264’ -‘png’ manages the same lossless quality as ‘rawvideo’ but yields -smaller files. Type ffmpeg -codecs in a terminal to get a list -of accepted codecs.

    -

    Note for default ‘libx264’: by default the pixel format yuv420p -is used. If the video dimensions are not both even (e.g. 720x405) -another pixel format is used, and this can cause problem in some -video readers.

    -

  • -
  • audiofile (str, optional) – The name of an audio file that will be incorporated to the video.

  • -
  • preset (str, optional) – Sets the time that FFMPEG will take to compress the video. The slower, -the better the compression rate. Possibilities are: "ultrafast", -"superfast", "veryfast", "faster", "fast", "medium" -(default), "slow", "slower", "veryslow", "placebo".

  • -
  • bitrate (str, optional) – Only relevant for codecs which accept a bitrate. “5000k” offers -nice results in general.

  • -
  • with_mask (bool, optional) – Set to True if there is a mask in the video to be encoded.

  • -
  • pixel_format (str, optional) – Optional: Pixel format for the output video file. If is not specified -"rgb24" will be used as the default format unless with_mask is -set as True, then "rgba" will be used.

  • -
  • logfile (int, optional) – File descriptor for logging output. If not defined, subprocess.PIPE -will be used. Defined using another value, the log level of the ffmpeg -command will be “info”, otherwise “error”.

  • -
  • threads (int, optional) – Number of threads used to write the output with ffmpeg.

  • -
  • ffmpeg_params (list, optional) – Additional parameters passed to ffmpeg command.

  • -
-
-
-
-
-close()[source]#
-

Closes the writer, terminating the subprocess if is still alive.

-
- -
-
-write_frame(img_array)[source]#
-

Writes one frame in the file.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_image.html b/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_image.html deleted file mode 100644 index bed415bd7..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_image.html +++ /dev/null @@ -1,678 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_writer.ffmpeg_write_image — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_writer.ffmpeg_write_image#

-
-
-moviepy.video.io.ffmpeg_writer.ffmpeg_write_image(filename, image, logfile=False, pixel_format=None)[source]#
-

Writes an image (HxWx3 or HxWx4 numpy array) to a file, using ffmpeg.

-
-
Parameters:
-
    -
  • filename (str) – Path to the output file.

  • -
  • image (np.ndarray) – Numpy array with the image data.

  • -
  • logfile (bool, optional) – Writes the ffmpeg output inside a logging file (True) or not -(False).

  • -
  • pixel_format (str, optional) – Pixel format for ffmpeg. If not defined, it will be discovered checking -if the image data contains an alpha channel ("rgba") or not -("rgb24").

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_video.html b/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_video.html deleted file mode 100644 index 8ffe86c27..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_video.html +++ /dev/null @@ -1,666 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_writer.ffmpeg_write_video — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_writer.ffmpeg_write_video#

-
-
-moviepy.video.io.ffmpeg_writer.ffmpeg_write_video(clip, filename, fps, codec='libx264', bitrate=None, preset='medium', write_logfile=False, audiofile=None, threads=None, ffmpeg_params=None, logger='bar', pixel_format=None)[source]#
-

Write the clip to a videofile. See VideoClip.write_videofile for details -on the parameters.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffmpeg_writer.html b/reference/reference/moviepy.video.io.ffmpeg_writer.html deleted file mode 100644 index 60978fa7c..000000000 --- a/reference/reference/moviepy.video.io.ffmpeg_writer.html +++ /dev/null @@ -1,670 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffmpeg_writer — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffmpeg_writer#

-

On the long term this will implement several methods to make videos -out of VideoClips

-

Classes

- - - - - - -

FFMPEG_VideoWriter(filename, size, fps[, ...])

A class for FFMPEG-based video writing.

-

Functions

- - - - - - - - - -

ffmpeg_write_image(filename, image[, ...])

Writes an image (HxWx3 or HxWx4 numpy array) to a file, using ffmpeg.

ffmpeg_write_video(clip, filename, fps[, ...])

Write the clip to a videofile.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.html b/reference/reference/moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.html deleted file mode 100644 index f8f6eea77..000000000 --- a/reference/reference/moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.html +++ /dev/null @@ -1,691 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer#

-
-
-class moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer(size, fps, pixel_format)[source]#
-

A class for FFPLAY-based video preview.

-
-
Parameters:
-
    -
  • size (tuple or list) – Size of the output video in pixels (width, height).

  • -
  • fps (int) – Frames per second in the output video file.

  • -
  • pixel_format (str) – Pixel format for the output video file, rgb24 for normal video, rgba -if video with mask.

  • -
-
-
-
-
-close()[source]#
-

Closes the writer, terminating the subprocess if is still alive.

-
- -
-
-show_frame(img_array)[source]#
-

Writes one frame in the file.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffplay_previewer.ffplay_preview_video.html b/reference/reference/moviepy.video.io.ffplay_previewer.ffplay_preview_video.html deleted file mode 100644 index f5f4fed10..000000000 --- a/reference/reference/moviepy.video.io.ffplay_previewer.ffplay_preview_video.html +++ /dev/null @@ -1,685 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffplay_previewer.ffplay_preview_video — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffplay_previewer.ffplay_preview_video#

-
-
-moviepy.video.io.ffplay_previewer.ffplay_preview_video(clip, fps, pixel_format='rgb24', audio_flag=None, video_flag=None)[source]#
-

Preview the clip using ffplay. See VideoClip.preview for details -on the parameters.

-
-
Parameters:
-
    -
  • clip (VideoClip) – The clip to preview

  • -
  • fps (int) – Number of frames per seconds in the displayed video.

  • -
  • pixel_format (str, optional) –

    Warning: This is not used anywhere in the code and should probably -be remove. -It is believed pixel format rgb24 does not work properly for now because -it require applying mask on CompositeVideoClip and thoses are believed to -not be working.

    -

    Pixel format for the output video file, rgb24 for normal video, rgba -if video with mask

    -

  • -
  • audio_flag (Thread.Event, optional) – A thread event that video will wait for. If not provided we ignore audio

  • -
  • video_flag (Thread.Event, optional) – A thread event that video will set after first frame has been shown. If not -provided, we simply ignore

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.ffplay_previewer.html b/reference/reference/moviepy.video.io.ffplay_previewer.html deleted file mode 100644 index 455df4e4f..000000000 --- a/reference/reference/moviepy.video.io.ffplay_previewer.html +++ /dev/null @@ -1,667 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.ffplay_previewer — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.ffplay_previewer#

-

On the long term this will implement several methods to make videos -out of VideoClips

-

Classes

- - - - - - -

FFPLAY_VideoPreviewer(size, fps, pixel_format)

A class for FFPLAY-based video preview.

-

Functions

- - - - - - -

ffplay_preview_video(clip, fps[, ...])

Preview the clip using ffplay.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.gif_writers.html b/reference/reference/moviepy.video.io.gif_writers.html deleted file mode 100644 index dd846287b..000000000 --- a/reference/reference/moviepy.video.io.gif_writers.html +++ /dev/null @@ -1,658 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.gif_writers — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.gif_writers#

-

MoviePy video GIFs writing.

-

Functions

- - - - - - -

write_gif_with_imageio(clip, filename[, ...])

Writes the gif with the Python library ImageIO (calls FreeImage).

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.gif_writers.write_gif_with_imageio.html b/reference/reference/moviepy.video.io.gif_writers.write_gif_with_imageio.html deleted file mode 100644 index 3fb86b5c0..000000000 --- a/reference/reference/moviepy.video.io.gif_writers.write_gif_with_imageio.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - - - - - - - moviepy.video.io.gif_writers.write_gif_with_imageio — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io.gif_writers.write_gif_with_imageio#

-
-
-moviepy.video.io.gif_writers.write_gif_with_imageio(clip, filename, fps=None, loop=0, logger='bar')[source]#
-

Writes the gif with the Python library ImageIO (calls FreeImage).

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.io.html b/reference/reference/moviepy.video.io.html deleted file mode 100644 index 02b88dbcf..000000000 --- a/reference/reference/moviepy.video.io.html +++ /dev/null @@ -1,679 +0,0 @@ - - - - - - - - - - - - moviepy.video.io — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.io#

-

Classes and methods for reading, writing and previewing video files.

-

Modules

- - - - - - - - - - - - - - - - - - - - - - - - - - - -

moviepy.video.io.ImageSequenceClip

Implements ImageSequenceClip, a class to create a video clip from a set of image files.

moviepy.video.io.VideoFileClip

Implements VideoFileClip, a class for video clips creation using video files.

moviepy.video.io.display_in_notebook

Implements display_in_notebook, a function to embed images/videos/audio in the Jupyter Notebook.

moviepy.video.io.ffmpeg_reader

Implements all the functions to read a video or a picture using ffmpeg.

moviepy.video.io.ffmpeg_tools

Miscellaneous bindings to ffmpeg.

moviepy.video.io.ffmpeg_writer

On the long term this will implement several methods to make videos out of VideoClips

moviepy.video.io.ffplay_previewer

On the long term this will implement several methods to make videos out of VideoClips

moviepy.video.io.gif_writers

MoviePy video GIFs writing.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.credits.CreditsClip.html b/reference/reference/moviepy.video.tools.credits.CreditsClip.html deleted file mode 100644 index cb7b919eb..000000000 --- a/reference/reference/moviepy.video.tools.credits.CreditsClip.html +++ /dev/null @@ -1,724 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.credits.CreditsClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.credits.CreditsClip#

-
-
-class moviepy.video.tools.credits.CreditsClip(creditfile, width, color='white', stroke_color='black', stroke_width=2, font='Impact-Normal', font_size=60, bg_color=None, gap=0)[source]#
-

Credits clip.

-
-
Parameters:
-

creditfile

A string or path like object pointing to a text file -whose content must be as follows:

-
# This is a comment
-# The next line says : leave 4 blank lines
-.blank 4
-
-..Executive Story Editor
-MARCEL DURAND
-
-..Associate Producers
-MARTIN MARCEL
-DIDIER MARTIN
-
-..Music Supervisor
-JEAN DIDIER
-
-
-

-
-
-
-
width

Total width of the credits text in pixels

-
-
gap

Horizontal gap in pixels between the jobs and the names

-
-
color

Color of the text. See TextClip.list('color') -for a list of acceptable names.

-
-
font

Name of the font to use. See TextClip.list('font') for -the list of fonts you can use on your computer.

-
-
font_size

Size of font to use

-
-
stroke_color

Color of the stroke (=contour line) of the text. If None, -there will be no stroke.

-
-
stroke_width

Width of the stroke, in pixels. Can be a float, like 1.5.

-
-
bg_color

Color of the background. If None, the background will be transparent.

-
-
-
-
Returns:
-

An ImageClip instance that looks like this and can be scrolled -to make some credits:

-
Executive Story Editor    MARCEL DURAND
-   Associate Producers    MARTIN MARCEL
-                          DIDIER MARTIN
-      Music Supervisor    JEAN DIDIER
-
-
-

-
-
Return type:
-

image

-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.credits.html b/reference/reference/moviepy.video.tools.credits.html deleted file mode 100644 index 525fca244..000000000 --- a/reference/reference/moviepy.video.tools.credits.html +++ /dev/null @@ -1,659 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.credits — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.credits#

-

Contains different functions to make end and opening credits, even though it is -difficult to fill everyone needs in this matter.

-

Classes

- - - - - - -

CreditsClip(creditfile, width[, color, ...])

Credits clip.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.cuts.FramesMatch.html b/reference/reference/moviepy.video.tools.cuts.FramesMatch.html deleted file mode 100644 index ddc52789d..000000000 --- a/reference/reference/moviepy.video.tools.cuts.FramesMatch.html +++ /dev/null @@ -1,675 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.cuts.FramesMatch — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.cuts.FramesMatch#

-
-
-class moviepy.video.tools.cuts.FramesMatch(start_time, end_time, min_distance, max_distance)[source]#
-

Frames match inside a set of frames.

-
-
Parameters:
-
    -
  • start_time (float) – Starting time.

  • -
  • end_time (float) – End time.

  • -
  • min_distance (float) – Lower bound on the distance between the first and last frames

  • -
  • max_distance (float) – Upper bound on the distance between the first and last frames

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.cuts.FramesMatches.html b/reference/reference/moviepy.video.tools.cuts.FramesMatches.html deleted file mode 100644 index 7d4a5d289..000000000 --- a/reference/reference/moviepy.video.tools.cuts.FramesMatches.html +++ /dev/null @@ -1,871 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.cuts.FramesMatches — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.cuts.FramesMatches#

-
-
-class moviepy.video.tools.cuts.FramesMatches(lst)[source]#
-

Frames matches inside a set of frames.

-

You can instantiate it passing a list of FramesMatch objects or -using the class methods load and from_clip.

-
-
Parameters:
-

lst (list) – Iterable of FramesMatch objects.

-
-
-
-
-best(n=1, percent=None)[source]#
-

Returns a new instance of FramesMatches object or a FramesMatch -from the current class instance given different conditions.

-

By default returns the first FramesMatch that the current instance -stores.

-
-
Parameters:
-
    -
  • n (int, optional) – Number of matches to retrieve from the current FramesMatches object. -Only has effect when percent=None.

  • -
  • percent (float, optional) – Percent of the current match to retrieve.

  • -
-
-
Returns:
-

FramesMatch or FramesMatches – greater than 1 returns a FramesMatches object, otherwise a -FramesMatch.

-
-
Return type:
-

If the number of matches to retrieve is

-
-
-
- -
-
-filter(condition)[source]#
-

Return a FramesMatches object obtained by filtering out the -FramesMatch which do not satistify a condition.

-
-
Parameters:
-

condition (func) – Function which takes a FrameMatch object as parameter and returns a -bool.

-
-
-

Examples

-
>>> # Only keep the matches corresponding to (> 1 second) sequences.
->>> new_matches = matches.filter( lambda match: match.time_span > 1)
-
-
-
- -
-
-static from_clip(clip, distance_threshold, max_duration, fps=None, logger='bar')[source]#
-

Finds all the frames that look alike in a clip, for instance to make -a looping GIF.

-
-
Parameters:
-
    -
  • clip (moviepy.video.VideoClip.VideoClip) – A MoviePy video clip.

  • -
  • distance_threshold (float) – Distance above which a match is rejected.

  • -
  • max_duration (float) – Maximal duration (in seconds) between two matching frames.

  • -
  • fps (int, optional) – Frames per second (default will be clip.fps).

  • -
  • logger (str, optional) – Either "bar" for progress bar or None or any Proglog logger.

  • -
-
-
Returns:
-

All pairs of frames with end_time - start_time < max_duration -and whose distance is under distance_threshold.

-
-
Return type:
-

FramesMatches

-
-
-

Examples

-

We find all matching frames in a given video and turn the best match -with a duration of 1.5 seconds or more into a GIF:

-
>>> from moviepy import VideoFileClip
->>> from moviepy.video.tools.cuts import FramesMatches
->>>
->>> clip = VideoFileClip("foo.mp4").resize(width=200)
->>> matches = FramesMatches.from_clip(
-...     clip, distance_threshold=10, max_duration=3,  # will take time
-... )
->>> best = matches.filter(lambda m: m.time_span > 1.5).best()
->>> clip.with_subclip(best.start_time, best.end_time).write_gif("foo.gif")
-
-
-
- -
-
-static load(filename)[source]#
-

Load a FramesMatches object from a file.

-
-
Parameters:
-

filename (str) – Path to the file to use loading a FramesMatches object.

-
-
-

Examples

-
>>> matching_frames = FramesMatches.load("somefile")
-
-
-
- -
-
-save(filename)[source]#
-

Save a FramesMatches object to a file.

-
-
Parameters:
-

filename (str) – Path to the file in which will be dumped the FramesMatches object data.

-
-
-
- -
-
-select_scenes(match_threshold, min_time_span, nomatch_threshold=None, time_distance=0)[source]#
-

Select the scenes at which a video clip can be reproduced as the -smoothest possible way, mainly oriented for the creation of GIF images.

-
-
Parameters:
-
    -
  • match_threshold (float) – Maximum distance possible between frames. The smaller, the -better-looping the GIFs are.

  • -
  • min_time_span (float) – Minimum duration for a scene. Only matches with a duration longer -than the value passed to this parameters will be extracted.

  • -
  • nomatch_threshold (float, optional) – Minimum distance possible between frames. If is None, then it is -chosen equal to match_threshold.

  • -
  • time_distance (float, optional) – Minimum time offset possible between matches.

  • -
-
-
Returns:
-

FramesMatches

-
-
Return type:
-

New instance of the class with the selected scenes.

-
-
-

Examples

-
>>> from pprint import pprint
->>> from moviepy import *
->>> from moviepy.video.tools.cuts import FramesMatches
->>>
->>> ch_clip = VideoFileClip("media/chaplin.mp4").with_subclip(1, 4)
->>> mirror_and_clip = [ch_clip.with_effects([vfx.TimeMirror()]), ch_clip]
->>> clip = concatenate_videoclips(mirror_and_clip)
->>>
->>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes(
-...     1, 2, nomatch_threshold=0,
-... )
->>> print(result)
-[(1.0000, 4.0000, 0.0000, 0.0000),
- (1.1600, 3.8400, 0.0000, 0.0000),
- (1.2800, 3.7200, 0.0000, 0.0000),
- (1.4000, 3.6000, 0.0000, 0.0000)]
-
-
-
- -
-
-write_gifs(clip, gifs_dir, **kwargs)[source]#
-

Extract the matching frames represented by the instance from a clip -and write them as GIFs in a directory, one GIF for each matching frame.

-
-
Parameters:
-
    -
  • clip (video.VideoClip.VideoClip) – A video clip whose frames scenes you want to obtain as GIF images.

  • -
  • gif_dir (str) – Directory in which the GIF images will be written.

  • -
  • kwargs – Passed as clip.write_gif optional arguments.

  • -
-
-
-

Examples

-
>>> import os
->>> from pprint import pprint
->>> from moviepy import *
->>> from moviepy.video.tools.cuts import FramesMatches
->>>
->>> ch_clip = VideoFileClip("media/chaplin.mp4").with_subclip(1, 4)
->>> clip = concatenate_videoclips([ch_clip.time_mirror(), ch_clip])
->>>
->>> result = FramesMatches.from_clip(clip, 10, 3).select_scenes(
-...     1, 2, nomatch_threshold=0,
-... )
->>>
->>> os.mkdir("foo")
->>> result.write_gifs(clip, "foo")
-MoviePy - Building file foo/00000100_00000400.gif with imageio.
-MoviePy - Building file foo/00000115_00000384.gif with imageio.
-MoviePy - Building file foo/00000128_00000372.gif with imageio.
-MoviePy - Building file foo/00000140_00000360.gif with imageio.
-
-
-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.cuts.detect_scenes.html b/reference/reference/moviepy.video.tools.cuts.detect_scenes.html deleted file mode 100644 index 02562833a..000000000 --- a/reference/reference/moviepy.video.tools.cuts.detect_scenes.html +++ /dev/null @@ -1,694 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.cuts.detect_scenes — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.cuts.detect_scenes#

-
-
-moviepy.video.tools.cuts.detect_scenes(clip=None, luminosities=None, luminosity_threshold=10, logger='bar', fps=None)[source]#
-

Detects scenes of a clip based on luminosity changes.

-

Note that for large clip this may take some time.

-
-
Returns:
-

tuple – cuts is a series of cuts [(0,t1), (t1,t2),…(…,tf)] -luminosities are the luminosities computed for each -frame of the clip.

-
-
Return type:
-

cuts, luminosities

-
-
Parameters:
-
    -
  • clip (video.VideoClip.VideoClip, optional) – A video clip. Can be None if a list of luminosities is -provided instead. If provided, the luminosity of each -frame of the clip will be computed. If the clip has no -‘fps’ attribute, you must provide it.

  • -
  • luminosities (list, optional) – A list of luminosities, e.g. returned by detect_scenes -in a previous run.

  • -
  • luminosity_threshold (float, optional) – Determines a threshold above which the ‘luminosity jumps’ -will be considered as scene changes. A scene change is defined -as a change between 2 consecutive frames that is larger than -(avg * thr) where avg is the average of the absolute changes -between consecutive frames.

  • -
  • logger (str, optional) – Either "bar" for progress bar or None or any Proglog logger.

  • -
  • fps (int, optional) – Frames per second value. Must be provided if you provide -no clip or a clip without fps attribute.

  • -
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.cuts.find_video_period.html b/reference/reference/moviepy.video.tools.cuts.find_video_period.html deleted file mode 100644 index 3199daa1e..000000000 --- a/reference/reference/moviepy.video.tools.cuts.find_video_period.html +++ /dev/null @@ -1,684 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.cuts.find_video_period — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.cuts.find_video_period#

-
-
-moviepy.video.tools.cuts.find_video_period(clip, fps=None, start_time=0.3)[source]#
-

Find the period of a video based on frames correlation.

-
-
Parameters:
-
    -
  • clip (moviepy.Clip.Clip) – Clip for which the video period will be computed.

  • -
  • fps (int, optional) – Number of frames per second used computing the period. Higher values will -produce more accurate periods, but the execution time will be longer.

  • -
  • start_time (float, optional) – First timeframe used to calculate the period of the clip.

  • -
-
-
-

Examples

-
>>> from moviepy import *
->>> from moviepy.video.tools.cuts import find_video_period
->>>
->>> clip = VideoFileClip("media/chaplin.mp4").with_subclip(0, 1).loop(2)
->>> round(videotools.find_video_period(clip, fps=80), 6)
-1
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.cuts.html b/reference/reference/moviepy.video.tools.cuts.html deleted file mode 100644 index 86a8d61bc..000000000 --- a/reference/reference/moviepy.video.tools.cuts.html +++ /dev/null @@ -1,672 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.cuts — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.cuts#

-

Contains everything that can help automate the cuts in MoviePy.

-

Classes

- - - - - - - - - -

FramesMatch(start_time, end_time, ...)

Frames match inside a set of frames.

FramesMatches(lst)

Frames matches inside a set of frames.

-

Functions

- - - - - - - - - -

detect_scenes([clip, luminosities, ...])

Detects scenes of a clip based on luminosity changes.

find_video_period(clip[, fps, start_time])

Find the period of a video based on frames correlation.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.drawing.blit.html b/reference/reference/moviepy.video.tools.drawing.blit.html deleted file mode 100644 index 97e70f35c..000000000 --- a/reference/reference/moviepy.video.tools.drawing.blit.html +++ /dev/null @@ -1,667 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.drawing.blit — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.drawing.blit#

-
-
-moviepy.video.tools.drawing.blit(im1, im2, pos=None, mask=None)[source]#
-

Blit an image over another.

-

Blits im1 on im2 as position pos=(x,y), using the -mask if provided.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.drawing.circle.html b/reference/reference/moviepy.video.tools.drawing.circle.html deleted file mode 100644 index 76e103095..000000000 --- a/reference/reference/moviepy.video.tools.drawing.circle.html +++ /dev/null @@ -1,695 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.drawing.circle — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.drawing.circle#

-
-
-moviepy.video.tools.drawing.circle(screensize, center, radius, color=1.0, bg_color=0, blur=1)[source]#
-

Draw an image with a circle.

-

Draws a circle of color color, on a background of color bg_color, -on a screen of size screensize at the position center=(x, y), -with a radius radius but slightly blurred on the border by blur -pixels.

-
-
Parameters:
-
    -
  • screensize (tuple or list) – Size of the canvas.

  • -
  • center (tuple or list) – Center of the circle.

  • -
  • radius (float) – Radius of the circle, in pixels.

  • -
  • bg_color (tuple or float, optional) – Color for the background of the canvas. As default, black.

  • -
  • blur (float, optional) – Blur for the border of the circle.

  • -
-
-
-

Examples

-
>>> from moviepy.video.tools.drawing import circle
->>>
->>> circle(
-...     (5, 5),  # size
-...     (2, 2),  # center
-...      2,      # radius
-... )
-array([[0.        , 0.        , 0.        , 0.        , 0.        ],
-       [0.        , 0.58578644, 1.        , 0.58578644, 0.        ],
-       [0.        , 1.        , 1.        , 1.        , 0.        ],
-       [0.        , 0.58578644, 1.        , 0.58578644, 0.        ],
-       [0.        , 0.        , 0.        , 0.        , 0.        ]])
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.drawing.color_gradient.html b/reference/reference/moviepy.video.tools.drawing.color_gradient.html deleted file mode 100644 index 25d748763..000000000 --- a/reference/reference/moviepy.video.tools.drawing.color_gradient.html +++ /dev/null @@ -1,738 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.drawing.color_gradient — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.drawing.color_gradient#

-
-
-moviepy.video.tools.drawing.color_gradient(size, p1, p2=None, vector=None, radius=None, color_1=0.0, color_2=1.0, shape='linear', offset=0)[source]#
-

Draw a linear, bilinear, or radial gradient.

-

The result is a picture of size size, whose color varies -gradually from color color_1 in position p1 to color color_2 -in position p2.

-

If it is a RGB picture the result must be transformed into -a ‘uint8’ array to be displayed normally:

-
-
Parameters:
-
    -
  • size (tuple or list) – Size (width, height) in pixels of the final image array.

  • -
  • p1 (tuple or list) – Position for the first coordinate of the gradient in pixels (x, y). -The color ‘before’ p1 is color_1 and it gradually changes in -the direction of p2 until it is color_2 when it reaches p2.

  • -
  • p2 (tuple or list, optional) –

    -
    Position for the second coordinate of the gradient in pixels (x, y).

    Coordinates (x, y) of the limit point for color_1 -and color_2.

    -
    -
    -

  • -
  • vector (tuple or list, optional) – A vector (x, y) in pixels that can be provided instead of p2. -p2 is then defined as (p1 + vector).

  • -
  • color_1 (tuple or list, optional) – Starting color for the gradient. As default, black. Either floats -between 0 and 1 (for gradients used in masks) or [R, G, B] arrays -(for colored gradients).

  • -
  • color_2 (tuple or list, optional) – Color for the second point in the gradient. As default, white. Either -floats between 0 and 1 (for gradients used in masks) or [R, G, B] -arrays (for colored gradients).

  • -
  • shape (str, optional) – Shape of the gradient. Can be either "linear", "bilinear" or -"circular". In a linear gradient the color varies in one direction, -from point p1 to point p2. In a bilinear gradient it also -varies symmetrically from p1 in the other direction. In a circular -gradient it goes from color_1 to color_2 in all directions.

  • -
  • radius (float, optional) – If shape="radial", the radius of the gradient is defined with the -parameter radius, in pixels.

  • -
  • offset (float, optional) – Real number between 0 and 1 indicating the fraction of the vector -at which the gradient actually starts. For instance if offset -is 0.9 in a gradient going from p1 to p2, then the gradient will -only occur near p2 (before that everything is of color color_1) -If the offset is 0.9 in a radial gradient, the gradient will -occur in the region located between 90% and 100% of the radius, -this creates a blurry disc of radius d(p1, p2).

  • -
-
-
Returns:
-

An Numpy array of dimensions (width, height, n_colors) of type float -representing the image of the gradient.

-
-
Return type:
-

image

-
-
-

Examples

-
>>> color_gradient((10, 1), (0, 0), p2=(10, 0))  # from white to black
-[[1.  0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]]
->>>
->>> color_gradient(  # from red to green
-...     (10, 1),  # size
-...     (0, 0),   # p1
-...     p2=(10, 0),
-...     color_1=(255, 0, 0),  # red
-...     color_2=(0, 255, 0),  # green
-... )
-[[[  0.  255.    0. ]
-  [ 25.5 229.5   0. ]
-  [ 51.  204.    0. ]
-  [ 76.5 178.5   0. ]
-  [102.  153.    0. ]
-  [127.5 127.5   0. ]
-  [153.  102.    0. ]
-  [178.5  76.5   0. ]
-  [204.   51.    0. ]
-  [229.5  25.5   0. ]]]
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.drawing.color_split.html b/reference/reference/moviepy.video.tools.drawing.color_split.html deleted file mode 100644 index 0fb265db0..000000000 --- a/reference/reference/moviepy.video.tools.drawing.color_split.html +++ /dev/null @@ -1,706 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.drawing.color_split — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.drawing.color_split#

-
-
-moviepy.video.tools.drawing.color_split(size, x=None, y=None, p1=None, p2=None, vector=None, color_1=0, color_2=1.0, gradient_width=0)[source]#
-

Make an image split in 2 colored regions.

-

Returns an array of size size divided in two regions called 1 and -2 in what follows, and which will have colors color_1 and color_2 -respectively.

-
-
Parameters:
-
    -
  • x (int, optional) – If provided, the image is split horizontally in x, the left -region being region 1.

  • -
  • y (int, optional) – If provided, the image is split vertically in y, the top region -being region 1.

  • -
  • p1 (tuple or list, optional) – Positions (x1, y1), (x2, y2) in pixels, where the numbers can be -floats. Region 1 is defined as the whole region on the left when -going from p1 to p2.

  • -
  • p2 (tuple or list, optional) – Positions (x1, y1), (x2, y2) in pixels, where the numbers can be -floats. Region 1 is defined as the whole region on the left when -going from p1 to p2.

  • -
  • p1p1 is (x1,y1) and vector (v1,v2), where the numbers can be -floats. Region 1 is then the region on the left when starting -in position p1 and going in the direction given by vector.

  • -
  • vector (tuple or list, optional) – p1 is (x1,y1) and vector (v1,v2), where the numbers can be -floats. Region 1 is then the region on the left when starting -in position p1 and going in the direction given by vector.

  • -
  • gradient_width (float, optional) – If not zero, the split is not sharp, but gradual over a region of -width gradient_width (in pixels). This is preferable in many -situations (for instance for antialiasing).

  • -
-
-
-

Examples

-
>>> size = [200, 200]
->>>
->>> # an image with all pixels with x<50 =0, the others =1
->>> color_split(size, x=50, color_1=0, color_2=1)
->>>
->>> # an image with all pixels with y<50 red, the others green
->>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0])
->>>
->>> # An image split along an arbitrary line (see below)
->>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1)
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.drawing.html b/reference/reference/moviepy.video.tools.drawing.html deleted file mode 100644 index 654c6cf18..000000000 --- a/reference/reference/moviepy.video.tools.drawing.html +++ /dev/null @@ -1,668 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.drawing — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.drawing#

-

Deals with making images (np arrays). It provides drawing -methods that are difficult to do with the existing Python libraries.

-

Functions

- - - - - - - - - - - - - - - -

blit(im1, im2[, pos, mask])

Blit an image over another.

circle(screensize, center, radius[, color, ...])

Draw an image with a circle.

color_gradient(size, p1[, p2, vector, ...])

Draw a linear, bilinear, or radial gradient.

color_split(size[, x, y, p1, p2, vector, ...])

Make an image split in 2 colored regions.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.html b/reference/reference/moviepy.video.tools.html deleted file mode 100644 index a494d621f..000000000 --- a/reference/reference/moviepy.video.tools.html +++ /dev/null @@ -1,669 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools#

-

Modules

- - - - - - - - - - - - - - - - - - -

moviepy.video.tools.credits

Contains different functions to make end and opening credits, even though it is difficult to fill everyone needs in this matter.

moviepy.video.tools.cuts

Contains everything that can help automate the cuts in MoviePy.

moviepy.video.tools.drawing

Deals with making images (np arrays).

moviepy.video.tools.interpolators

Classes for easy interpolation of trajectories and curves.

moviepy.video.tools.subtitles

Experimental module for subtitles support.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.interpolators.Interpolator.html b/reference/reference/moviepy.video.tools.interpolators.Interpolator.html deleted file mode 100644 index 1cb868857..000000000 --- a/reference/reference/moviepy.video.tools.interpolators.Interpolator.html +++ /dev/null @@ -1,686 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.interpolators.Interpolator — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.interpolators.Interpolator#

-
-
-class moviepy.video.tools.interpolators.Interpolator(tt=None, ss=None, ttss=None, left=None, right=None)[source]#
-

Poorman’s linear interpolator.

-
-
Parameters:
-
    -
  • tt (list, optional) – List of time frames for the interpolator.

  • -
  • ss (list, optional) – List of values for the interpolator.

  • -
  • ttss (list, optional) – Lists of time frames and their correspondients values for the -interpolator. This argument can be used instead of tt and ss -to instantiate the interpolator using an unique argument.

  • -
  • left (float, optional) – Value to return when t < tt[0].

  • -
  • right (float, optional) – Value to return when t > tt[-1].

  • -
-
-
-

Examples

-
>>> # instantiate using `tt` and `ss`
->>> interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5])
->>>
->>> # instantiate using `ttss`
->>> interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]])  # [t, value]
-
-
-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.interpolators.Trajectory.html b/reference/reference/moviepy.video.tools.interpolators.Trajectory.html deleted file mode 100644 index 0660c7b8c..000000000 --- a/reference/reference/moviepy.video.tools.interpolators.Trajectory.html +++ /dev/null @@ -1,801 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.interpolators.Trajectory — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.interpolators.Trajectory#

-
-
-class moviepy.video.tools.interpolators.Trajectory(tt, xx, yy)[source]#
-

Trajectory compound by time frames and (x, y) pixels.

-

It’s designed as an interpolator, so you can get the position at a given -time t. You can instantiate it from a file using the methods -from_file and load_list.

-
-
Parameters:
-
    -
  • tt (list or numpy.ndarray) – Time frames.

  • -
  • xx (list or numpy.ndarray) – X positions in the trajectory.

  • -
  • yy (list or numpy.ndarray) – Y positions in the trajectory.

  • -
-
-
-

Examples

-
>>> trajectory = Trajectory([0, .166, .333], [554, 474, 384], [100, 90, 91])
-
-
-
-
-addx(x)[source]#
-

Adds a value to the xx position of the trajectory.

-
-
Parameters:
-

x (int) – Value added to xx in the trajectory.

-
-
Returns:
-

Trajectory

-
-
Return type:
-

new instance with the new X position included.

-
-
-
- -
-
-addy(y)[source]#
-

Adds a value to the yy position of the trajectory.

-
-
Parameters:
-

y (int) – Value added to yy in the trajectory.

-
-
Returns:
-

Trajectory

-
-
Return type:
-

new instance with the new Y position included.

-
-
-
- -
-
-static from_file(filename)[source]#
-

Instantiates an object of Trajectory using a data text file.

-
-
Parameters:
-

filename (str) – Path to the location of trajectory text file to load.

-
-
Returns:
-

Trajectory

-
-
Return type:
-

new instance loaded from text file.

-
-
-
- -
-
-static load_list(filename)[source]#
-

Loads a list of trajectories from a data text file.

-
-
Parameters:
-

filename (str) – Path of the text file that stores the data of a set of trajectories.

-
-
Returns:
-

list

-
-
Return type:
-

List of trajectories loaded from the file.

-
-
-
- -
-
-static save_list(trajs, filename)[source]#
-

Saves a set of trajectories into a text file.

-
-
Parameters:
-
    -
  • trajs (list) – List of trajectories to be saved.

  • -
  • filename (str) – Path of the text file that will store the trajectories data.

  • -
-
-
-
- -
-
-to_file(filename)[source]#
-

Saves the trajectory data in a text file.

-
-
Parameters:
-

filename (str) – Path to the location of the new trajectory text file.

-
-
-
- -
-
-txy(tms=False)[source]#
-

Returns all times with the X and Y values of each position.

-
-
Parameters:
-

tms (bool, optional) – If is True, the time will be returned in milliseconds.

-
-
-
- -
-
-update_interpolators()[source]#
-

Updates the internal X and Y position interpolators for the instance.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.interpolators.html b/reference/reference/moviepy.video.tools.interpolators.html deleted file mode 100644 index 06aa2af3f..000000000 --- a/reference/reference/moviepy.video.tools.interpolators.html +++ /dev/null @@ -1,661 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.interpolators — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.interpolators#

-

Classes for easy interpolation of trajectories and curves.

-

Classes

- - - - - - - - - -

Interpolator([tt, ss, ttss, left, right])

Poorman's linear interpolator.

Trajectory(tt, xx, yy)

Trajectory compound by time frames and (x, y) pixels.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.subtitles.SubtitlesClip.html b/reference/reference/moviepy.video.tools.subtitles.SubtitlesClip.html deleted file mode 100644 index 96a933a4a..000000000 --- a/reference/reference/moviepy.video.tools.subtitles.SubtitlesClip.html +++ /dev/null @@ -1,721 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.subtitles.SubtitlesClip — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.subtitles.SubtitlesClip#

-
-
-class moviepy.video.tools.subtitles.SubtitlesClip(subtitles, font=None, make_textclip=None, encoding=None)[source]#
-

A Clip that serves as “subtitle track” in videos.

-

One particularity of this class is that the images of the -subtitle texts are not generated beforehand, but only if -needed.

-
-
Parameters:
-
    -
  • subtitles – Either the name of a file as a string or path-like object, or a list

  • -
  • font – Path to a font file to be used. Optional if make_textclip is provided.

  • -
  • make_textclip

    A custom function to use for text clip generation. If None, a TextClip -will be generated.

    -

    The function must take a text as argument and return a VideoClip -to be used as caption

    -

  • -
  • encoding – Optional, specifies srt file encoding. -Any standard Python encoding is allowed (listed at -https://docs.python.org/3.8/library/codecs.html#standard-encodings)

  • -
-
-
-

Examples

-
>>> from moviepy.video.tools.subtitles import SubtitlesClip
->>> from moviepy.video.io.VideoFileClip import VideoFileClip
->>> generator = lambda text: TextClip(text, font='Georgia-Regular',
-...                                   font_size=24, color='white')
->>> sub = SubtitlesClip("subtitles.srt", generator)
->>> sub = SubtitlesClip("subtitles.srt", generator, encoding='utf-8')
->>> myvideo = VideoFileClip("myvideo.avi")
->>> final = CompositeVideoClip([clip, subtitles])
->>> final.write_videofile("final.mp4", fps=myvideo.fps)
-
-
-
-
-in_subclip(start_time=None, end_time=None)[source]#
-

Returns a sequence of [(t1,t2), text] covering all the given subclip -from start_time to end_time. The first and last times will be cropped so as -to be exactly start_time and end_time if possible.

-
- -
-
-match_expr(expr)[source]#
-

Matches a regular expression against the subtitles of the clip.

-
- -
-
-write_srt(filename)[source]#
-

Writes an .srt file with the content of the clip.

-
- -
- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.subtitles.file_to_subtitles.html b/reference/reference/moviepy.video.tools.subtitles.file_to_subtitles.html deleted file mode 100644 index 7df6a9a9d..000000000 --- a/reference/reference/moviepy.video.tools.subtitles.file_to_subtitles.html +++ /dev/null @@ -1,668 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.subtitles.file_to_subtitles — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.subtitles.file_to_subtitles#

-
-
-moviepy.video.tools.subtitles.file_to_subtitles(filename, encoding=None)[source]#
-

Converts a srt file into subtitles.

-

The returned list is of the form [((start_time,end_time),'some text'),...] -and can be fed to SubtitlesClip.

-

Only works for ‘.srt’ format for the moment.

-
- -
- - -
- - - - - -
- - - -
- - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/reference/reference/moviepy.video.tools.subtitles.html b/reference/reference/moviepy.video.tools.subtitles.html deleted file mode 100644 index 875c17512..000000000 --- a/reference/reference/moviepy.video.tools.subtitles.html +++ /dev/null @@ -1,666 +0,0 @@ - - - - - - - - - - - - moviepy.video.tools.subtitles — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - - - -
- -
- - -
-
- - - - - -
- -
-

moviepy.video.tools.subtitles#

-

Experimental module for subtitles support.

-

Classes

- - - - - - -

SubtitlesClip(subtitles[, font, ...])

A Clip that serves as "subtitle track" in videos.

-

Functions

- - - - - - -

file_to_subtitles(filename[, encoding])

Converts a srt file into subtitles.

-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/search.html b/search.html deleted file mode 100644 index adc2c6fbf..000000000 --- a/search.html +++ /dev/null @@ -1,444 +0,0 @@ - - - - - - - - - - Search - MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - -
- -
- - -
-
- -
- - -
-

Search

- - - -
-
- - - - -
- -
-
-
- -
- - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/searchindex.js b/searchindex.js deleted file mode 100644 index 7c5b0b69d..000000000 --- a/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({"docnames": ["developer_guide/contribution_guidelines", "developer_guide/developers_install", "developer_guide/index", "getting_started/FAQ", "getting_started/docker", "getting_started/index", "getting_started/install", "getting_started/moviepy_10_minutes", "getting_started/quick_presentation", "getting_started/updating_to_v2", "index", "reference/index", "reference/reference/moviepy", "reference/reference/moviepy.Clip", "reference/reference/moviepy.Clip.Clip", "reference/reference/moviepy.Effect", "reference/reference/moviepy.audio", "reference/reference/moviepy.audio.AudioClip", "reference/reference/moviepy.audio.AudioClip.AudioArrayClip", "reference/reference/moviepy.audio.AudioClip.AudioClip", "reference/reference/moviepy.audio.AudioClip.CompositeAudioClip", "reference/reference/moviepy.audio.AudioClip.concatenate_audioclips", "reference/reference/moviepy.audio.fx", "reference/reference/moviepy.audio.fx.AudioDelay", "reference/reference/moviepy.audio.fx.AudioFadeIn", "reference/reference/moviepy.audio.fx.AudioFadeOut", "reference/reference/moviepy.audio.fx.AudioLoop", "reference/reference/moviepy.audio.fx.AudioNormalize", "reference/reference/moviepy.audio.fx.MultiplyStereoVolume", "reference/reference/moviepy.audio.fx.MultiplyVolume", "reference/reference/moviepy.audio.io", "reference/reference/moviepy.audio.io.AudioFileClip", "reference/reference/moviepy.audio.io.AudioFileClip.AudioFileClip", "reference/reference/moviepy.audio.io.ffmpeg_audiowriter", "reference/reference/moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter", "reference/reference/moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite", "reference/reference/moviepy.audio.io.ffplay_audiopreviewer", "reference/reference/moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer", "reference/reference/moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview", "reference/reference/moviepy.audio.io.readers", "reference/reference/moviepy.audio.io.readers.FFMPEG_AudioReader", "reference/reference/moviepy.audio.tools", "reference/reference/moviepy.audio.tools.cuts", "reference/reference/moviepy.audio.tools.cuts.find_audio_period", "reference/reference/moviepy.config", "reference/reference/moviepy.config.check", "reference/reference/moviepy.config.try_cmd", "reference/reference/moviepy.decorators", "reference/reference/moviepy.decorators.add_mask_if_none", "reference/reference/moviepy.decorators.apply_to_audio", "reference/reference/moviepy.decorators.apply_to_mask", "reference/reference/moviepy.decorators.audio_video_effect", "reference/reference/moviepy.decorators.convert_masks_to_RGB", "reference/reference/moviepy.decorators.convert_parameter_to_seconds", "reference/reference/moviepy.decorators.convert_path_to_string", "reference/reference/moviepy.decorators.outplace", "reference/reference/moviepy.decorators.preprocess_args", "reference/reference/moviepy.decorators.requires_duration", "reference/reference/moviepy.decorators.requires_fps", "reference/reference/moviepy.decorators.use_clip_fps_by_default", "reference/reference/moviepy.tools", "reference/reference/moviepy.tools.close_all_clips", "reference/reference/moviepy.tools.convert_to_seconds", "reference/reference/moviepy.tools.cross_platform_popen_params", "reference/reference/moviepy.tools.deprecated_version_of", "reference/reference/moviepy.tools.find_extension", "reference/reference/moviepy.tools.no_display_available", "reference/reference/moviepy.tools.subprocess_call", "reference/reference/moviepy.video", "reference/reference/moviepy.video.VideoClip", "reference/reference/moviepy.video.VideoClip.BitmapClip", "reference/reference/moviepy.video.VideoClip.ColorClip", "reference/reference/moviepy.video.VideoClip.DataVideoClip", "reference/reference/moviepy.video.VideoClip.ImageClip", "reference/reference/moviepy.video.VideoClip.TextClip", "reference/reference/moviepy.video.VideoClip.UpdatedVideoClip", "reference/reference/moviepy.video.VideoClip.VideoClip", "reference/reference/moviepy.video.compositing", "reference/reference/moviepy.video.compositing.CompositeVideoClip", "reference/reference/moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip", "reference/reference/moviepy.video.compositing.CompositeVideoClip.clips_array", "reference/reference/moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips", "reference/reference/moviepy.video.fx", "reference/reference/moviepy.video.fx.AccelDecel", "reference/reference/moviepy.video.fx.BlackAndWhite", "reference/reference/moviepy.video.fx.Blink", "reference/reference/moviepy.video.fx.Crop", "reference/reference/moviepy.video.fx.CrossFadeIn", "reference/reference/moviepy.video.fx.CrossFadeOut", "reference/reference/moviepy.video.fx.EvenSize", "reference/reference/moviepy.video.fx.FadeIn", "reference/reference/moviepy.video.fx.FadeOut", "reference/reference/moviepy.video.fx.Freeze", "reference/reference/moviepy.video.fx.FreezeRegion", "reference/reference/moviepy.video.fx.GammaCorrection", "reference/reference/moviepy.video.fx.HeadBlur", "reference/reference/moviepy.video.fx.InvertColors", "reference/reference/moviepy.video.fx.Loop", "reference/reference/moviepy.video.fx.LumContrast", "reference/reference/moviepy.video.fx.MakeLoopable", "reference/reference/moviepy.video.fx.Margin", "reference/reference/moviepy.video.fx.MaskColor", "reference/reference/moviepy.video.fx.MasksAnd", "reference/reference/moviepy.video.fx.MasksOr", "reference/reference/moviepy.video.fx.MirrorX", "reference/reference/moviepy.video.fx.MirrorY", "reference/reference/moviepy.video.fx.MultiplyColor", "reference/reference/moviepy.video.fx.MultiplySpeed", "reference/reference/moviepy.video.fx.Painting", "reference/reference/moviepy.video.fx.Resize", "reference/reference/moviepy.video.fx.Rotate", "reference/reference/moviepy.video.fx.Scroll", "reference/reference/moviepy.video.fx.SlideIn", "reference/reference/moviepy.video.fx.SlideOut", "reference/reference/moviepy.video.fx.SuperSample", "reference/reference/moviepy.video.fx.TimeMirror", "reference/reference/moviepy.video.fx.TimeSymmetrize", "reference/reference/moviepy.video.io", "reference/reference/moviepy.video.io.ImageSequenceClip", "reference/reference/moviepy.video.io.ImageSequenceClip.ImageSequenceClip", "reference/reference/moviepy.video.io.VideoFileClip", "reference/reference/moviepy.video.io.VideoFileClip.VideoFileClip", "reference/reference/moviepy.video.io.display_in_notebook", "reference/reference/moviepy.video.io.display_in_notebook.HTML2", "reference/reference/moviepy.video.io.display_in_notebook.display_in_notebook", "reference/reference/moviepy.video.io.display_in_notebook.html_embed", "reference/reference/moviepy.video.io.ffmpeg_reader", "reference/reference/moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader", "reference/reference/moviepy.video.io.ffmpeg_reader.FFmpegInfosParser", "reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos", "reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_read_image", "reference/reference/moviepy.video.io.ffmpeg_tools", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_resize", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video", "reference/reference/moviepy.video.io.ffmpeg_writer", "reference/reference/moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter", "reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_image", "reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_video", "reference/reference/moviepy.video.io.ffplay_previewer", "reference/reference/moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer", "reference/reference/moviepy.video.io.ffplay_previewer.ffplay_preview_video", "reference/reference/moviepy.video.io.gif_writers", "reference/reference/moviepy.video.io.gif_writers.write_gif_with_imageio", "reference/reference/moviepy.video.tools", "reference/reference/moviepy.video.tools.credits", "reference/reference/moviepy.video.tools.credits.CreditsClip", "reference/reference/moviepy.video.tools.cuts", "reference/reference/moviepy.video.tools.cuts.FramesMatch", "reference/reference/moviepy.video.tools.cuts.FramesMatches", "reference/reference/moviepy.video.tools.cuts.detect_scenes", "reference/reference/moviepy.video.tools.cuts.find_video_period", "reference/reference/moviepy.video.tools.drawing", "reference/reference/moviepy.video.tools.drawing.blit", "reference/reference/moviepy.video.tools.drawing.circle", "reference/reference/moviepy.video.tools.drawing.color_gradient", "reference/reference/moviepy.video.tools.drawing.color_split", "reference/reference/moviepy.video.tools.interpolators", "reference/reference/moviepy.video.tools.interpolators.Interpolator", "reference/reference/moviepy.video.tools.interpolators.Trajectory", "reference/reference/moviepy.video.tools.subtitles", "reference/reference/moviepy.video.tools.subtitles.SubtitlesClip", "reference/reference/moviepy.video.tools.subtitles.file_to_subtitles", "user_guide/compositing", "user_guide/create_effects", "user_guide/index", "user_guide/loading", "user_guide/modifying", "user_guide/rendering"], "filenames": ["developer_guide/contribution_guidelines.rst", "developer_guide/developers_install.rst", "developer_guide/index.rst", "getting_started/FAQ.rst", "getting_started/docker.rst", "getting_started/index.rst", "getting_started/install.rst", "getting_started/moviepy_10_minutes.rst", "getting_started/quick_presentation.rst", "getting_started/updating_to_v2.rst", "index.rst", "reference/index.rst", "reference/reference/moviepy.rst", "reference/reference/moviepy.Clip.rst", "reference/reference/moviepy.Clip.Clip.rst", "reference/reference/moviepy.Effect.rst", "reference/reference/moviepy.audio.rst", "reference/reference/moviepy.audio.AudioClip.rst", "reference/reference/moviepy.audio.AudioClip.AudioArrayClip.rst", "reference/reference/moviepy.audio.AudioClip.AudioClip.rst", "reference/reference/moviepy.audio.AudioClip.CompositeAudioClip.rst", "reference/reference/moviepy.audio.AudioClip.concatenate_audioclips.rst", "reference/reference/moviepy.audio.fx.rst", "reference/reference/moviepy.audio.fx.AudioDelay.rst", "reference/reference/moviepy.audio.fx.AudioFadeIn.rst", "reference/reference/moviepy.audio.fx.AudioFadeOut.rst", "reference/reference/moviepy.audio.fx.AudioLoop.rst", "reference/reference/moviepy.audio.fx.AudioNormalize.rst", "reference/reference/moviepy.audio.fx.MultiplyStereoVolume.rst", "reference/reference/moviepy.audio.fx.MultiplyVolume.rst", "reference/reference/moviepy.audio.io.rst", "reference/reference/moviepy.audio.io.AudioFileClip.rst", "reference/reference/moviepy.audio.io.AudioFileClip.AudioFileClip.rst", "reference/reference/moviepy.audio.io.ffmpeg_audiowriter.rst", "reference/reference/moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.rst", "reference/reference/moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite.rst", "reference/reference/moviepy.audio.io.ffplay_audiopreviewer.rst", "reference/reference/moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.rst", "reference/reference/moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview.rst", "reference/reference/moviepy.audio.io.readers.rst", "reference/reference/moviepy.audio.io.readers.FFMPEG_AudioReader.rst", "reference/reference/moviepy.audio.tools.rst", "reference/reference/moviepy.audio.tools.cuts.rst", "reference/reference/moviepy.audio.tools.cuts.find_audio_period.rst", "reference/reference/moviepy.config.rst", "reference/reference/moviepy.config.check.rst", "reference/reference/moviepy.config.try_cmd.rst", "reference/reference/moviepy.decorators.rst", "reference/reference/moviepy.decorators.add_mask_if_none.rst", "reference/reference/moviepy.decorators.apply_to_audio.rst", "reference/reference/moviepy.decorators.apply_to_mask.rst", "reference/reference/moviepy.decorators.audio_video_effect.rst", "reference/reference/moviepy.decorators.convert_masks_to_RGB.rst", "reference/reference/moviepy.decorators.convert_parameter_to_seconds.rst", "reference/reference/moviepy.decorators.convert_path_to_string.rst", "reference/reference/moviepy.decorators.outplace.rst", "reference/reference/moviepy.decorators.preprocess_args.rst", "reference/reference/moviepy.decorators.requires_duration.rst", "reference/reference/moviepy.decorators.requires_fps.rst", "reference/reference/moviepy.decorators.use_clip_fps_by_default.rst", "reference/reference/moviepy.tools.rst", "reference/reference/moviepy.tools.close_all_clips.rst", "reference/reference/moviepy.tools.convert_to_seconds.rst", "reference/reference/moviepy.tools.cross_platform_popen_params.rst", "reference/reference/moviepy.tools.deprecated_version_of.rst", "reference/reference/moviepy.tools.find_extension.rst", "reference/reference/moviepy.tools.no_display_available.rst", "reference/reference/moviepy.tools.subprocess_call.rst", "reference/reference/moviepy.video.rst", "reference/reference/moviepy.video.VideoClip.rst", "reference/reference/moviepy.video.VideoClip.BitmapClip.rst", "reference/reference/moviepy.video.VideoClip.ColorClip.rst", "reference/reference/moviepy.video.VideoClip.DataVideoClip.rst", "reference/reference/moviepy.video.VideoClip.ImageClip.rst", "reference/reference/moviepy.video.VideoClip.TextClip.rst", "reference/reference/moviepy.video.VideoClip.UpdatedVideoClip.rst", "reference/reference/moviepy.video.VideoClip.VideoClip.rst", "reference/reference/moviepy.video.compositing.rst", "reference/reference/moviepy.video.compositing.CompositeVideoClip.rst", "reference/reference/moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.rst", "reference/reference/moviepy.video.compositing.CompositeVideoClip.clips_array.rst", "reference/reference/moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips.rst", "reference/reference/moviepy.video.fx.rst", "reference/reference/moviepy.video.fx.AccelDecel.rst", "reference/reference/moviepy.video.fx.BlackAndWhite.rst", "reference/reference/moviepy.video.fx.Blink.rst", "reference/reference/moviepy.video.fx.Crop.rst", "reference/reference/moviepy.video.fx.CrossFadeIn.rst", "reference/reference/moviepy.video.fx.CrossFadeOut.rst", "reference/reference/moviepy.video.fx.EvenSize.rst", "reference/reference/moviepy.video.fx.FadeIn.rst", "reference/reference/moviepy.video.fx.FadeOut.rst", "reference/reference/moviepy.video.fx.Freeze.rst", "reference/reference/moviepy.video.fx.FreezeRegion.rst", "reference/reference/moviepy.video.fx.GammaCorrection.rst", "reference/reference/moviepy.video.fx.HeadBlur.rst", "reference/reference/moviepy.video.fx.InvertColors.rst", "reference/reference/moviepy.video.fx.Loop.rst", "reference/reference/moviepy.video.fx.LumContrast.rst", "reference/reference/moviepy.video.fx.MakeLoopable.rst", "reference/reference/moviepy.video.fx.Margin.rst", "reference/reference/moviepy.video.fx.MaskColor.rst", "reference/reference/moviepy.video.fx.MasksAnd.rst", "reference/reference/moviepy.video.fx.MasksOr.rst", "reference/reference/moviepy.video.fx.MirrorX.rst", "reference/reference/moviepy.video.fx.MirrorY.rst", "reference/reference/moviepy.video.fx.MultiplyColor.rst", "reference/reference/moviepy.video.fx.MultiplySpeed.rst", "reference/reference/moviepy.video.fx.Painting.rst", "reference/reference/moviepy.video.fx.Resize.rst", "reference/reference/moviepy.video.fx.Rotate.rst", "reference/reference/moviepy.video.fx.Scroll.rst", "reference/reference/moviepy.video.fx.SlideIn.rst", "reference/reference/moviepy.video.fx.SlideOut.rst", "reference/reference/moviepy.video.fx.SuperSample.rst", "reference/reference/moviepy.video.fx.TimeMirror.rst", "reference/reference/moviepy.video.fx.TimeSymmetrize.rst", "reference/reference/moviepy.video.io.rst", "reference/reference/moviepy.video.io.ImageSequenceClip.rst", "reference/reference/moviepy.video.io.ImageSequenceClip.ImageSequenceClip.rst", "reference/reference/moviepy.video.io.VideoFileClip.rst", "reference/reference/moviepy.video.io.VideoFileClip.VideoFileClip.rst", "reference/reference/moviepy.video.io.display_in_notebook.rst", "reference/reference/moviepy.video.io.display_in_notebook.HTML2.rst", "reference/reference/moviepy.video.io.display_in_notebook.display_in_notebook.rst", "reference/reference/moviepy.video.io.display_in_notebook.html_embed.rst", "reference/reference/moviepy.video.io.ffmpeg_reader.rst", "reference/reference/moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.rst", "reference/reference/moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.rst", "reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos.rst", "reference/reference/moviepy.video.io.ffmpeg_reader.ffmpeg_read_image.rst", "reference/reference/moviepy.video.io.ffmpeg_tools.rst", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio.rst", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip.rst", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio.rst", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_resize.rst", "reference/reference/moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video.rst", "reference/reference/moviepy.video.io.ffmpeg_writer.rst", "reference/reference/moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.rst", "reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_image.rst", "reference/reference/moviepy.video.io.ffmpeg_writer.ffmpeg_write_video.rst", "reference/reference/moviepy.video.io.ffplay_previewer.rst", "reference/reference/moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.rst", "reference/reference/moviepy.video.io.ffplay_previewer.ffplay_preview_video.rst", "reference/reference/moviepy.video.io.gif_writers.rst", "reference/reference/moviepy.video.io.gif_writers.write_gif_with_imageio.rst", "reference/reference/moviepy.video.tools.rst", "reference/reference/moviepy.video.tools.credits.rst", "reference/reference/moviepy.video.tools.credits.CreditsClip.rst", "reference/reference/moviepy.video.tools.cuts.rst", "reference/reference/moviepy.video.tools.cuts.FramesMatch.rst", "reference/reference/moviepy.video.tools.cuts.FramesMatches.rst", "reference/reference/moviepy.video.tools.cuts.detect_scenes.rst", "reference/reference/moviepy.video.tools.cuts.find_video_period.rst", "reference/reference/moviepy.video.tools.drawing.rst", "reference/reference/moviepy.video.tools.drawing.blit.rst", "reference/reference/moviepy.video.tools.drawing.circle.rst", "reference/reference/moviepy.video.tools.drawing.color_gradient.rst", "reference/reference/moviepy.video.tools.drawing.color_split.rst", "reference/reference/moviepy.video.tools.interpolators.rst", "reference/reference/moviepy.video.tools.interpolators.Interpolator.rst", "reference/reference/moviepy.video.tools.interpolators.Trajectory.rst", "reference/reference/moviepy.video.tools.subtitles.rst", "reference/reference/moviepy.video.tools.subtitles.SubtitlesClip.rst", "reference/reference/moviepy.video.tools.subtitles.file_to_subtitles.rst", "user_guide/compositing.rst", "user_guide/create_effects.rst", "user_guide/index.rst", "user_guide/loading.rst", "user_guide/modifying.rst", "user_guide/rendering.rst"], "titles": ["MoviePy\u2019s Contribution Guidelines", "Installation for MoviePy developers", "The MoviePy Developers Guide", "FAQ and troubleshooting", "MoviePy Docker", "Getting started with MoviePy", "Installation", "MoviePy in 10 Minutes: Creating a Trailer from \u201cBig Buck Bunny\u201d", "Quick presentation", "Updating from v1.X to v2.X", "MoviePy documentation", "Api Reference", "moviepy", "moviepy.Clip", "moviepy.Clip.Clip", "moviepy.Effect", "moviepy.audio", "moviepy.audio.AudioClip", "moviepy.audio.AudioClip.AudioArrayClip", "moviepy.audio.AudioClip.AudioClip", "moviepy.audio.AudioClip.CompositeAudioClip", "moviepy.audio.AudioClip.concatenate_audioclips", "moviepy.audio.fx", "moviepy.audio.fx.AudioDelay", "moviepy.audio.fx.AudioFadeIn", "moviepy.audio.fx.AudioFadeOut", "moviepy.audio.fx.AudioLoop", "moviepy.audio.fx.AudioNormalize", "moviepy.audio.fx.MultiplyStereoVolume", "moviepy.audio.fx.MultiplyVolume", "moviepy.audio.io", "moviepy.audio.io.AudioFileClip", "moviepy.audio.io.AudioFileClip.AudioFileClip", "moviepy.audio.io.ffmpeg_audiowriter", "moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter", "moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite", "moviepy.audio.io.ffplay_audiopreviewer", "moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer", "moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview", "moviepy.audio.io.readers", "moviepy.audio.io.readers.FFMPEG_AudioReader", "moviepy.audio.tools", "moviepy.audio.tools.cuts", "moviepy.audio.tools.cuts.find_audio_period", "moviepy.config", "moviepy.config.check", "moviepy.config.try_cmd", "moviepy.decorators", "moviepy.decorators.add_mask_if_none", "moviepy.decorators.apply_to_audio", "moviepy.decorators.apply_to_mask", "moviepy.decorators.audio_video_effect", "moviepy.decorators.convert_masks_to_RGB", "moviepy.decorators.convert_parameter_to_seconds", "moviepy.decorators.convert_path_to_string", "moviepy.decorators.outplace", "moviepy.decorators.preprocess_args", "moviepy.decorators.requires_duration", "moviepy.decorators.requires_fps", "moviepy.decorators.use_clip_fps_by_default", "moviepy.tools", "moviepy.tools.close_all_clips", "moviepy.tools.convert_to_seconds", "moviepy.tools.cross_platform_popen_params", "moviepy.tools.deprecated_version_of", "moviepy.tools.find_extension", "moviepy.tools.no_display_available", "moviepy.tools.subprocess_call", "moviepy.video", "moviepy.video.VideoClip", "moviepy.video.VideoClip.BitmapClip", "moviepy.video.VideoClip.ColorClip", "moviepy.video.VideoClip.DataVideoClip", "moviepy.video.VideoClip.ImageClip", "moviepy.video.VideoClip.TextClip", "moviepy.video.VideoClip.UpdatedVideoClip", "moviepy.video.VideoClip.VideoClip", "moviepy.video.compositing", "moviepy.video.compositing.CompositeVideoClip", "moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip", "moviepy.video.compositing.CompositeVideoClip.clips_array", "moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips", "moviepy.video.fx", "moviepy.video.fx.AccelDecel", "moviepy.video.fx.BlackAndWhite", "moviepy.video.fx.Blink", "moviepy.video.fx.Crop", "moviepy.video.fx.CrossFadeIn", "moviepy.video.fx.CrossFadeOut", "moviepy.video.fx.EvenSize", "moviepy.video.fx.FadeIn", "moviepy.video.fx.FadeOut", "moviepy.video.fx.Freeze", "moviepy.video.fx.FreezeRegion", "moviepy.video.fx.GammaCorrection", "moviepy.video.fx.HeadBlur", "moviepy.video.fx.InvertColors", "moviepy.video.fx.Loop", "moviepy.video.fx.LumContrast", "moviepy.video.fx.MakeLoopable", "moviepy.video.fx.Margin", "moviepy.video.fx.MaskColor", "moviepy.video.fx.MasksAnd", "moviepy.video.fx.MasksOr", "moviepy.video.fx.MirrorX", "moviepy.video.fx.MirrorY", "moviepy.video.fx.MultiplyColor", "moviepy.video.fx.MultiplySpeed", "moviepy.video.fx.Painting", "moviepy.video.fx.Resize", "moviepy.video.fx.Rotate", "moviepy.video.fx.Scroll", "moviepy.video.fx.SlideIn", "moviepy.video.fx.SlideOut", "moviepy.video.fx.SuperSample", "moviepy.video.fx.TimeMirror", "moviepy.video.fx.TimeSymmetrize", "moviepy.video.io", "moviepy.video.io.ImageSequenceClip", "moviepy.video.io.ImageSequenceClip.ImageSequenceClip", "moviepy.video.io.VideoFileClip", "moviepy.video.io.VideoFileClip.VideoFileClip", "moviepy.video.io.display_in_notebook", "moviepy.video.io.display_in_notebook.HTML2", "moviepy.video.io.display_in_notebook.display_in_notebook", "moviepy.video.io.display_in_notebook.html_embed", "moviepy.video.io.ffmpeg_reader", "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader", "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser", "moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos", "moviepy.video.io.ffmpeg_reader.ffmpeg_read_image", "moviepy.video.io.ffmpeg_tools", "moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio", "moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip", "moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio", "moviepy.video.io.ffmpeg_tools.ffmpeg_resize", "moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video", "moviepy.video.io.ffmpeg_writer", "moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter", "moviepy.video.io.ffmpeg_writer.ffmpeg_write_image", "moviepy.video.io.ffmpeg_writer.ffmpeg_write_video", "moviepy.video.io.ffplay_previewer", "moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer", "moviepy.video.io.ffplay_previewer.ffplay_preview_video", "moviepy.video.io.gif_writers", "moviepy.video.io.gif_writers.write_gif_with_imageio", "moviepy.video.tools", "moviepy.video.tools.credits", "moviepy.video.tools.credits.CreditsClip", "moviepy.video.tools.cuts", "moviepy.video.tools.cuts.FramesMatch", "moviepy.video.tools.cuts.FramesMatches", "moviepy.video.tools.cuts.detect_scenes", "moviepy.video.tools.cuts.find_video_period", "moviepy.video.tools.drawing", "moviepy.video.tools.drawing.blit", "moviepy.video.tools.drawing.circle", "moviepy.video.tools.drawing.color_gradient", "moviepy.video.tools.drawing.color_split", "moviepy.video.tools.interpolators", "moviepy.video.tools.interpolators.Interpolator", "moviepy.video.tools.interpolators.Trajectory", "moviepy.video.tools.subtitles", "moviepy.video.tools.subtitles.SubtitlesClip", "moviepy.video.tools.subtitles.file_to_subtitles", "Compositing multiple clips", "Creating your own effects", "The MoviePy User Guide", "Loading resources as clips", "Modifying clips and apply effects", "Previewing and saving video clips"], "terms": {"keep": [0, 7, 14, 23, 121, 151, 165, 168, 169, 170], "messag": [0, 64, 128, 129], "issu": [0, 9, 10], "topic": 0, "point": [0, 7, 74, 108, 148, 157, 169, 170], "Be": 0, "awar": 0, "each": [0, 7, 9, 14, 23, 40, 70, 72, 73, 79, 80, 81, 85, 114, 119, 151, 152, 161, 165, 168, 169, 170], "comment": [0, 148], "trigger": [0, 7], "notif": 0, "which": [0, 3, 6, 7, 8, 9, 10, 14, 18, 19, 20, 21, 28, 34, 40, 51, 61, 64, 74, 76, 79, 80, 81, 86, 93, 101, 102, 103, 108, 121, 129, 132, 133, 138, 151, 152, 153, 157, 158, 165, 166, 168, 169, 170], "get": [0, 7, 10, 11, 14, 40, 86, 127, 129, 138, 161, 167], "sent": 0, "out": [0, 10, 25, 76, 130, 137, 141, 151, 169, 170], "number": [0, 19, 23, 32, 37, 38, 40, 72, 76, 83, 86, 90, 91, 97, 119, 127, 128, 138, 143, 151, 153, 157, 158, 168, 169, 170], "peopl": [0, 1, 2], "opinion": 0, "ar": [0, 6, 7, 8, 9, 13, 14, 19, 21, 38, 62, 72, 76, 79, 80, 81, 86, 96, 110, 119, 121, 138, 143, 151, 152, 154, 163, 165, 166, 168, 170], "ok": 0, "For": [0, 6, 7, 8, 9, 11, 14, 15, 23, 24, 25, 26, 27, 28, 29, 76, 79, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 157, 165, 166, 167, 168, 169, 170], "longer": [0, 9, 151, 153, 168], "more": [0, 7, 8, 9, 11, 19, 73, 76, 79, 119, 121, 124, 125, 151, 153, 166, 167, 168, 169, 170], "depth": [0, 7, 10], "discuss": [0, 9], "us": [0, 3, 6, 8, 9, 10, 14, 15, 19, 23, 24, 25, 26, 27, 28, 29, 31, 32, 34, 38, 40, 47, 51, 59, 60, 61, 64, 70, 71, 72, 73, 74, 76, 79, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 119, 120, 121, 126, 128, 129, 130, 134, 138, 139, 143, 148, 151, 153, 155, 157, 160, 161, 163, 165, 166, 167, 170], "gitter": 0, "If": [0, 3, 4, 6, 7, 9, 14, 15, 19, 20, 23, 24, 25, 26, 27, 28, 29, 32, 38, 40, 52, 61, 62, 70, 71, 74, 76, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 124, 125, 130, 136, 138, 139, 143, 148, 151, 152, 157, 158, 161, 163, 165, 166, 168, 169, 170], "lead": [0, 15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "decis": 0, "like": [0, 3, 6, 7, 8, 9, 14, 19, 32, 34, 40, 66, 73, 74, 76, 80, 86, 109, 121, 124, 125, 138, 148, 163, 165, 168, 169, 170], "merg": [0, 134], "reject": [0, 151], "pleas": [0, 3, 7, 11, 14, 76, 168], "leav": [0, 6, 9, 148, 170], "relev": [0, 34, 138, 169], "document": [0, 11, 166, 168], "outcom": 0, "reason": [0, 3, 8, 169], "do": [0, 6, 7, 9, 14, 81, 121, 151, 154, 165, 166, 168, 170], "push": 0, "ani": [0, 6, 7, 8, 9, 14, 15, 19, 32, 34, 40, 62, 63, 73, 76, 81, 83, 86, 102, 103, 107, 108, 115, 121, 130, 138, 151, 152, 163, 165, 168, 170], "commit": 0, "chang": [0, 3, 4, 7, 14, 28, 76, 128, 152, 157, 168, 169, 170], "api": [0, 5, 7, 10, 167, 168], "without": [0, 7, 9, 19, 23, 63, 76, 81, 152, 168, 169, 170], "prior": 0, "fork": 0, "offici": 0, "repositori": [0, 3, 10], "your": [0, 3, 6, 7, 8, 9, 19, 38, 74, 76, 124, 138, 148, 165, 167, 168, 169], "own": [0, 7, 8, 9, 15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 167, 169, 170], "account": [0, 74, 169], "button": 0, "top": [0, 7, 14, 19, 38, 74, 76, 79, 80, 86, 100, 112, 113, 158, 165], "right": [0, 6, 7, 28, 74, 76, 86, 100, 106, 112, 113, 160, 169], "corner": [0, 76, 86, 110, 165], "interfac": [0, 78], "while": [0, 9, 15, 23, 24, 25, 26, 27, 28, 29, 75, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 165, 168, 169], "view": 0, "basi": 0, "clone": 0, "machin": [0, 7, 8, 76], "git": 0, "url_to_your_fork": 0, "you": [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 19, 23, 24, 25, 26, 27, 28, 29, 32, 38, 61, 64, 73, 74, 76, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 119, 121, 124, 125, 138, 148, 151, 152, 161, 165, 166, 168, 169, 170], "can": [0, 1, 4, 6, 7, 8, 9, 10, 12, 14, 15, 19, 22, 23, 24, 25, 26, 27, 28, 29, 40, 51, 60, 74, 75, 76, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 119, 121, 124, 125, 127, 130, 138, 148, 149, 151, 152, 157, 158, 160, 161, 164, 165, 166, 168, 169, 170], "appropri": [0, 7, 127], "url": 0, "ssh": 0, "http": [0, 121, 163], "base": [0, 7, 8, 14, 15, 17, 19, 69, 76, 79, 124, 125, 138, 142, 152, 153, 165, 166, 168], "green": [0, 96, 102, 103, 157, 158, 169], "locat": [0, 4, 6, 76, 157, 161], "look": [0, 5, 6, 76, 148, 151, 165, 166, 169, 170], "By": [0, 7, 15, 23, 24, 25, 26, 27, 28, 29, 74, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 151, 168, 169, 170], "default": [0, 3, 6, 7, 14, 19, 29, 37, 71, 73, 74, 76, 90, 91, 104, 105, 111, 121, 124, 125, 128, 130, 133, 136, 138, 151, 156, 157, 165, 166, 168, 169, 170], "refer": [0, 5, 7, 10, 167, 168], "remot": 0, "from": [0, 3, 5, 6, 8, 12, 14, 18, 19, 23, 26, 27, 28, 29, 32, 40, 61, 73, 74, 76, 90, 97, 112, 113, 118, 119, 121, 124, 125, 127, 128, 132, 133, 151, 153, 156, 157, 158, 161, 163, 165, 166, 167, 168, 169, 170], "i": [0, 1, 4, 6, 7, 9, 10, 11, 14, 18, 19, 20, 23, 24, 27, 28, 32, 34, 37, 38, 40, 48, 52, 59, 61, 62, 63, 64, 66, 70, 71, 72, 73, 74, 75, 76, 79, 80, 81, 83, 84, 86, 87, 88, 92, 95, 97, 101, 106, 107, 109, 110, 112, 113, 115, 116, 119, 121, 124, 125, 127, 128, 129, 130, 138, 142, 143, 147, 148, 151, 152, 157, 158, 161, 163, 164, 165, 166, 167, 168, 169, 170], "e": [0, 14, 76, 79, 92, 97, 111, 116, 138, 152, 168, 169], "thi": [0, 1, 3, 5, 6, 7, 8, 9, 11, 14, 15, 19, 23, 28, 32, 40, 51, 63, 66, 73, 74, 76, 79, 86, 99, 100, 101, 114, 116, 119, 121, 124, 125, 127, 128, 129, 130, 137, 138, 141, 143, 147, 148, 151, 152, 157, 158, 160, 163, 165, 166, 168, 169, 170], "case": [0, 3, 6, 7, 8, 9, 14, 19, 51, 76, 101, 119, 168, 169], "origin": [0, 7, 8, 9, 10, 14, 15, 23, 24, 25, 26, 27, 28, 29, 32, 73, 74, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 165, 168, 169, 170], "enter": 0, "add": [0, 7, 8, 48, 63, 76, 100, 161, 165, 166], "second": [0, 6, 7, 8, 14, 18, 19, 23, 24, 25, 26, 29, 32, 34, 37, 40, 43, 53, 62, 72, 75, 76, 85, 87, 88, 90, 91, 92, 99, 119, 121, 124, 125, 138, 142, 143, 151, 152, 153, 157, 165, 168, 169], "alia": 0, "upstream": 0, "com": 0, "zulko": [0, 10, 129], "ssl": 0, "_or_": 0, "instal": [0, 2, 3, 4, 5, 7, 10, 76, 170], "librari": [0, 7, 8, 9, 10, 145, 154, 163, 168], "insid": [0, 4, 7, 139, 150, 151, 166], "virtual": 0, "environ": [0, 66], "all": [0, 8, 11, 13, 14, 15, 19, 20, 22, 38, 61, 73, 76, 77, 79, 80, 81, 82, 86, 96, 100, 119, 126, 128, 138, 151, 157, 158, 161, 163, 165, 166, 167, 168, 169], "depend": [0, 6, 7, 20, 61, 75, 83, 128, 165, 169, 170], "includ": [0, 7, 9, 14, 23, 32, 75, 76, 80, 87, 88, 112, 113, 121, 161, 168], "pip": [0, 1, 6, 7], "option": [0, 3, 6, 8, 9, 14, 19, 23, 29, 43, 61, 76, 100, 109, 110, 121, 124, 125, 128, 130, 132, 133, 134, 136, 138, 139, 143, 151, 152, 153, 156, 157, 158, 160, 161, 163, 165, 170], "doc": [0, 1, 7, 121, 163], "test": [0, 6, 19, 66, 70, 76, 124, 125], "lint": 0, "configur": [0, 6, 44, 168], "pre": [0, 9, 127], "hook": 0, "run": [0, 1, 6, 7, 8, 9, 52, 152, 168], "respect": [0, 7, 76, 158, 165, 168], "pep8": 0, "just": [0, 6, 7, 8, 19, 71, 76, 86, 130, 165, 166, 168], "amount": [0, 108], "try": [0, 6, 7, 9, 19, 38, 76, 168, 170], "write": [0, 6, 8, 9, 14, 19, 30, 33, 34, 35, 36, 74, 76, 81, 117, 135, 136, 138, 139, 140, 142, 144, 145, 151, 163, 165, 166, 168, 169, 170], "auto": [0, 6, 74], "veri": [0, 3, 7, 8, 76, 116, 165, 169, 170], "explicit": [0, 86, 168], "variabl": [0, 7, 53, 54, 56, 76], "name": [0, 7, 9, 19, 32, 34, 40, 64, 65, 74, 76, 119, 121, 124, 125, 128, 129, 130, 136, 138, 148, 163, 168, 169, 170], "introduc": [0, 9], "new": [0, 7, 8, 9, 10, 14, 15, 64, 75, 76, 83, 86, 100, 101, 109, 132, 133, 135, 136, 151, 161, 165, 168, 169], "function": [0, 7, 8, 10, 14, 17, 19, 23, 28, 33, 35, 36, 38, 40, 42, 44, 47, 49, 50, 51, 52, 56, 60, 63, 64, 72, 76, 78, 83, 109, 110, 122, 126, 127, 130, 131, 137, 141, 144, 147, 149, 151, 154, 162, 163, 165, 166, 168, 169, 170], "fix": [0, 7, 8, 74, 168, 170], "bug": [0, 7, 19, 76, 124, 125, 169], "docstr": [0, 64], "team": [0, 9], "adopt": 0, "check": [0, 6, 7, 9, 10, 128, 129, 139, 170], "black": [0, 1, 71, 74, 84, 90, 91, 96, 102, 108, 110, 148, 156, 157, 168], "flake8": 0, "isort": 0, "so": [0, 6, 7, 9, 12, 19, 27, 32, 74, 76, 80, 109, 124, 125, 161, 163, 165, 166, 168, 169, 170], "make": [0, 6, 7, 8, 9, 15, 19, 23, 24, 25, 26, 27, 28, 29, 75, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 130, 133, 137, 141, 147, 148, 151, 154, 158, 165, 168, 170], "sure": [0, 6, 7, 170], "ve": [0, 166], "master": 0, "branch": 0, "up": [0, 32, 76, 83, 121, 168, 170], "date": [0, 10], "repo": 0, "period": [0, 9, 43, 153], "fetch": [0, 127, 129], "never": [0, 168, 169], "directli": [0, 7, 8, 9, 12, 81, 130, 169, 170], "off": 0, "separ": [0, 28], "checkout": 0, "your_develop_branch": 0, "ideal": [0, 97], "given": [0, 19, 26, 34, 37, 40, 67, 73, 76, 79, 80, 101, 106, 124, 125, 128, 151, 158, 161, 163, 168, 169], "keyword": [0, 19, 76, 124, 125, 170], "what": [0, 7, 8, 19, 128, 129, 158, 165, 170], "work": [0, 3, 6, 7, 9, 10, 42, 62, 63, 66, 76, 85, 86, 87, 88, 112, 113, 136, 143, 164, 165, 166, 168, 169, 170], "prefix": [0, 169], "fix_": 0, "feature_": 0, "someth": [0, 8, 169], "similarli": 0, "descript": [0, 10], "most": [0, 2, 3, 7, 9, 79, 165, 168, 169, 170], "recent": [0, 3], "detail": [0, 7, 10, 11, 19, 76, 121, 140, 143, 168, 170], "explan": [0, 7, 10, 11], "last": [0, 7, 14, 23, 32, 119, 150, 163, 165, 169, 170], "It": [0, 3, 8, 10, 14, 66, 76, 83, 121, 138, 143, 154, 161, 165, 167, 168, 169, 170], "move": [0, 4, 40, 73, 76, 95, 127, 170], "updat": [0, 5, 7, 14, 40, 75, 161, 168], "have": [0, 3, 5, 6, 7, 8, 9, 10, 15, 19, 20, 23, 38, 75, 76, 79, 81, 101, 115, 119, 121, 127, 158, 165, 166, 167, 168, 169, 170], "other": [0, 7, 8, 9, 21, 76, 79, 80, 93, 121, 128, 157, 158, 165, 168, 169], "pr": 0, "befor": [0, 6, 7, 9, 15, 23, 24, 25, 26, 27, 28, 29, 32, 52, 56, 75, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 157, 165, 168, 169], "creat": [0, 5, 6, 8, 9, 32, 40, 49, 50, 63, 70, 74, 76, 80, 116, 118, 121, 127, 157, 165, 167, 168, 169, 170], "outdat": 0, "sens": [0, 9], "avoid": [0, 19, 40, 67, 76, 124, 125, 127, 129, 138], "fall": 0, "too": [0, 7, 9, 62, 90, 91, 104, 105, 111, 168, 170], "much": [0, 7, 19, 121], "behind": 0, "rebas": 0, "interv": [0, 23, 114], "sync": [0, 170], "per": [0, 14, 18, 19, 32, 34, 37, 40, 72, 76, 119, 121, 138, 142, 143, 151, 152, 153, 168, 170], "first": [0, 6, 8, 9, 14, 24, 32, 76, 79, 127, 143, 150, 151, 153, 157, 163, 165, 168, 169, 170], "haven": 0, "t": [0, 4, 6, 7, 14, 19, 20, 23, 38, 40, 75, 76, 79, 80, 81, 92, 93, 95, 109, 110, 114, 121, 124, 127, 160, 161, 165, 166, 168, 169, 170], "familiaris": 0, "yourself": [0, 7, 9, 165], "concept": [0, 7, 10, 11, 167], "finish": [0, 7, 168], "featur": 0, "mention": 0, "still": [0, 3, 19, 34, 37, 38, 40, 73, 76, 127, 138, 142, 165, 168, 169, 170], "progress": [0, 14, 19, 24, 25, 76, 87, 88, 90, 91, 99, 101, 151, 152, 166], "suit": 0, "over": [0, 8, 9, 14, 24, 25, 26, 79, 87, 88, 90, 91, 155, 158, 165, 168], "expos": 0, "problem": [0, 8, 19, 38, 138, 169, 170], "pytest": [0, 1, 4], "when": [0, 7, 8, 9, 14, 15, 19, 23, 24, 25, 26, 27, 28, 29, 63, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 124, 151, 157, 158, 160, 165, 166, 168, 169, 170], "now": [0, 7, 9, 143, 165, 168, 169, 170], "see": [0, 3, 6, 8, 9, 11, 14, 19, 32, 73, 74, 76, 79, 81, 90, 91, 119, 121, 129, 140, 143, 148, 158, 165, 166, 167, 168, 169, 170], "suggest": 0, "send": [0, 34, 37], "onc": [0, 1, 19, 38, 73, 116, 119, 168, 169, 170], "open": [0, 3, 10, 40, 63, 76, 127, 147], "present": [0, 5, 7, 167, 168], "templat": [0, 7], "ask": [0, 3, 9], "fill": [0, 40, 76, 80, 147, 165, 168], "encourag": [0, 7, 9, 166, 170], "addit": [0, 1, 7, 19, 76, 138, 166, 168, 169, 170], "inform": [0, 10, 121, 128, 129, 168], "help": [0, 3, 7, 121, 149, 170], "provid": [0, 7, 9, 10, 14, 19, 59, 74, 76, 92, 93, 119, 124, 125, 143, 152, 154, 155, 157, 158, 163, 165, 168, 169, 170], "further": [0, 5], "context": [0, 61, 121, 168], "link": [0, 10], "On": [0, 6, 137, 141, 168], "an": [0, 7, 8, 10, 14, 15, 18, 19, 20, 23, 24, 25, 26, 27, 28, 29, 32, 34, 35, 37, 38, 40, 43, 51, 57, 58, 61, 71, 73, 74, 75, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 119, 124, 125, 127, 129, 130, 138, 139, 148, 155, 156, 157, 158, 160, 161, 163, 165, 168, 169, 170], "autom": [0, 8, 10, 149], "submiss": 0, "might": [0, 6, 166], "take": [0, 6, 9, 24, 25, 29, 74, 138, 151, 152, 163, 165, 166, 168, 169, 170], "few": [0, 6, 7, 8, 9, 165, 166, 168], "minut": [0, 5, 62, 167, 169], "complet": [0, 76, 81], "In": [0, 1, 3, 6, 7, 8, 9, 14, 15, 23, 24, 25, 26, 27, 28, 29, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 119, 157, 165, 166, 168, 169, 170], "next": [0, 7, 80, 127, 129, 148, 168, 169], "step": [0, 8, 75, 168, 169, 170], "maintain": [0, 9, 15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 166], "review": 0, "necessari": [0, 7], "readi": [0, 7, 63], "part": [1, 76, 79, 86, 95, 165, 166, 168], "onli": [1, 3, 7, 8, 9, 14, 19, 32, 34, 62, 63, 66, 76, 81, 85, 86, 87, 88, 93, 110, 112, 113, 119, 124, 128, 129, 138, 151, 157, 163, 164, 165, 166, 168, 170], "destin": 1, "who": [1, 81, 168], "want": [1, 2, 6, 7, 8, 10, 61, 73, 74, 76, 81, 108, 138, 151, 165, 166, 167, 168, 169, 170], "build": [1, 151, 168], "themself": 1, "contribut": [1, 2], "normal": [1, 6, 24, 27, 76, 142, 143, 148, 157], "user": [1, 5, 7, 9, 10, 11, 76], "dont": [1, 6, 168, 169, 170], "need": [1, 2, 5, 6, 7, 9, 12, 75, 128, 129, 147, 163, 165, 166, 168, 169, 170], "main": [1, 7, 9, 17, 69, 78, 166, 167, 168, 169], "also": [1, 3, 4, 7, 8, 9, 14, 51, 76, 101, 121, 157, 165, 166, 168, 169, 170], "abl": [1, 166, 168, 169, 170], "requir": [1, 9, 19, 66, 75, 76, 80, 124, 143, 168, 169, 170], "sudo": [1, 6], "python": [1, 4, 6, 7, 8, 10, 80, 145, 154, 163, 168], "setup": [1, 6], "py": [1, 4, 7, 8], "build_doc": 1, "m": [1, 4, 7, 151], "And": [1, 7, 8, 168, 170], "cover": [2, 7, 163, 167], "thing": [2, 7, 9, 12, 64, 166, 169, 170], "particip": 2, "know": [2, 7, 169, 170], "": [2, 3, 7, 8, 10, 14, 19, 37, 40, 62, 73, 75, 76, 106, 107, 115, 116, 124, 125, 160, 161, 165, 168, 169, 170], "guidelin": [2, 10], "section": [3, 5, 7, 8, 167, 168], "intend": [3, 170], "answer": [3, 9], "question": 3, "These": [3, 14, 19, 76, 168], "consid": [3, 76, 119, 152, 165], "solv": [3, 170], "don": [3, 6, 7, 76, 81, 165, 169, 170], "report": 3, "github": [3, 9], "dedic": [3, 7], "forum": 3, "reddit": 3, "known": [3, 7, 165, 169, 170], "one": [3, 4, 7, 8, 9, 19, 21, 29, 32, 71, 72, 75, 76, 79, 80, 81, 93, 107, 109, 112, 113, 119, 121, 128, 130, 134, 138, 142, 151, 157, 165, 167, 168, 169], "dimens": [3, 81, 89, 121, 138, 157, 165, 168], "were": [3, 7, 9], "even": [3, 7, 8, 89, 138, 147, 168], "instanc": [3, 14, 19, 32, 38, 73, 76, 79, 121, 148, 151, 157, 158, 161, 168, 169, 170], "720x405": [3, 138], "mpeg4": [3, 76, 138, 170], "codec": [3, 19, 34, 35, 65, 76, 121, 134, 138, 140, 163, 170], "libx264": [3, 76, 138, 140], "format": [3, 7, 62, 76, 121, 130, 135, 138, 139, 142, 143, 164, 168, 170], "readabl": 3, "some": [3, 7, 9, 14, 15, 19, 23, 24, 25, 26, 27, 28, 29, 40, 64, 74, 75, 76, 80, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 124, 125, 127, 129, 138, 148, 152, 164, 165, 166, 168, 169, 170], "reader": [3, 32, 76, 121, 127, 138], "vlc": 3, "deprec": [3, 64], "version": [3, 6, 8, 9, 10, 76, 93, 109, 166, 169], "ffmpeg": [3, 6, 7, 8, 32, 33, 34, 36, 39, 40, 45, 76, 121, 126, 127, 128, 129, 130, 131, 134, 138, 139, 170], "websit": 3, "o": [3, 6, 46, 151, 170], "mean": [3, 7, 9, 19, 81, 114, 165, 168, 169, 170], "comput": [3, 6, 7, 14, 19, 20, 38, 73, 74, 76, 79, 92, 107, 109, 148, 152, 153, 168, 169, 170], "good": [3, 7, 19, 76, 124, 165, 167, 170], "enough": [3, 19, 38, 40, 110, 165, 166, 170], "render": [3, 8, 19, 20, 75, 76, 124, 125, 167, 168, 169, 170], "clip": [3, 15, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 31, 32, 35, 37, 38, 43, 48, 49, 50, 51, 52, 55, 57, 58, 59, 61, 64, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 118, 120, 121, 124, 125, 130, 133, 140, 143, 145, 148, 151, 152, 153, 163, 166, 167], "real": [3, 7, 76, 157, 165, 168, 170], "time": [3, 8, 9, 14, 15, 19, 20, 23, 24, 25, 26, 27, 28, 29, 40, 62, 73, 75, 76, 79, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 124, 127, 133, 138, 150, 151, 152, 153, 160, 161, 163, 166, 168, 170], "hesit": [3, 7, 170], "plai": [3, 7, 14, 18, 19, 20, 21, 26, 37, 76, 79, 81, 97, 107, 115, 116, 133, 165, 168, 169, 170], "lower": [3, 7, 8, 19, 38, 76, 79, 83, 86, 150, 170], "fp": [3, 7, 14, 18, 19, 23, 32, 35, 38, 40, 58, 59, 70, 72, 76, 79, 81, 119, 121, 124, 125, 127, 128, 129, 132, 138, 140, 142, 143, 145, 151, 152, 153, 163, 168, 170], "sound": [3, 14, 18, 19, 24, 25, 32, 34, 37, 38, 40, 76, 125, 132, 168, 169, 170], "11000": [3, 170], "hz": [3, 19, 168, 170], "fine": [3, 19, 37, 38, 170], "downsiz": [3, 165, 170], "resiz": [3, 7, 9, 14, 76, 81, 121, 135, 151, 165, 168, 169, 170], "mac": [4, 10], "window": [4, 6, 10, 63, 76, 168], "linux": [4, 6, 10, 66], "etc": [4, 7, 8, 14, 19, 34, 40, 73, 76, 96, 121, 124, 125, 138, 168, 170], "root": 4, "dir": [4, 168, 170], "dockerfil": 4, "f": [4, 128, 168], "contain": [4, 7, 10, 14, 19, 61, 76, 119, 121, 124, 125, 128, 139, 147, 149, 165], "follow": [4, 7, 8, 9, 14, 61, 75, 81, 83, 148, 158, 165, 168], "command": [4, 7, 46, 67, 128, 138, 170], "w": [4, 76, 111, 166, 169], "directori": [4, 6, 7, 76, 136, 151, 168], "where": [4, 14, 25, 40, 72, 75, 76, 79, 80, 83, 90, 91, 101, 112, 113, 152, 158, 165, 168, 169], "alreadi": [4, 7, 14, 19, 127, 136, 165, 166, 168, 169, 170], "connect": 4, "exec": 4, "myscript": 4, "isn": [4, 7, 76], "bash": 4, "start": [4, 7, 9, 10, 11, 14, 20, 23, 86, 111, 133, 150, 157, 158, 167, 169, 170], "v": [4, 96], "pwd": 4, "code": [4, 9, 125, 143, 166, 167, 168], "explain": [5, 8, 168], "everyth": [5, 7, 8, 9, 12, 16, 68, 149, 157, 168, 169, 170], "edit": [5, 7, 8, 9, 10, 14, 19, 41, 76, 124, 165, 167, 168, 169, 170], "To": [5, 6, 7, 8, 10, 86, 165, 166, 168, 169, 170], "go": [5, 7, 8, 76, 121, 157, 158, 166, 168, 170], "The": [5, 6, 7, 9, 10, 11, 14, 15, 19, 32, 38, 40, 64, 73, 74, 76, 79, 81, 83, 95, 96, 102, 103, 107, 109, 111, 115, 119, 121, 124, 128, 132, 136, 138, 143, 148, 151, 157, 163, 164, 165, 166, 168, 169, 170], "guid": [5, 7, 10, 11], "quick": [5, 7, 167, 170], "10": [5, 8, 23, 76, 86, 151, 152, 157, 165, 166, 167, 168, 169, 170], "trailer": [5, 167], "big": [5, 8, 167, 170], "buck": [5, 8, 167, 170], "bunni": [5, 8, 167, 170], "docker": 5, "v1": [5, 158], "x": [5, 76, 86, 111, 155, 156, 157, 158, 161, 165, 168], "v2": [5, 158], "faq": 5, "troubleshoot": 5, "done": [6, 7, 8, 40, 127, 165, 168], "how": [6, 9, 10, 24, 25, 83, 166, 167, 168, 170], "With": [6, 7, 9, 92, 170], "type": [6, 14, 61, 62, 64, 76, 128, 138, 148, 151, 152, 157, 161, 166, 168, 170], "termin": [6, 34, 37, 40, 127, 138, 142], "softwar": [6, 8, 10], "video": [6, 8, 9, 10, 14, 19, 24, 27, 29, 34, 38, 40, 51, 61, 65, 166, 167, 169], "read": [6, 7, 8, 14, 30, 32, 39, 40, 76, 117, 119, 121, 126, 127, 129, 130, 168], "ffplai": [6, 7, 8, 19, 37, 142, 143, 170], "preview": [6, 8, 9, 19, 30, 37, 38, 66, 76, 117, 124, 125, 142, 143, 167, 168, 169], "worri": [6, 165, 169], "about": [6, 7, 9, 16, 68, 165, 169], "should": [6, 7, 9, 14, 19, 40, 64, 76, 79, 83, 86, 97, 108, 119, 124, 143, 165, 168, 169, 170], "automat": [6, 7, 14, 20, 79, 80, 81, 92, 107, 109, 116, 165, 168, 169], "download": [6, 7], "imageio": [6, 8, 76, 145, 151], "dure": [6, 7, 9, 19, 26, 29, 38, 76, 81], "plan": 6, "audio": [6, 7, 8, 14, 49, 51, 61, 65, 73, 76, 107, 115, 116, 121, 122, 124, 125, 128, 132, 134, 138, 143, 166, 169, 170], "though": [6, 8, 9, 147, 166, 169, 170], "usual": [6, 168, 169], "found": [6, 45], "alongsid": 6, "access": [6, 7, 166, 169, 170], "set": [6, 7, 9, 14, 15, 19, 20, 23, 24, 25, 26, 27, 28, 29, 61, 67, 71, 73, 74, 76, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 118, 121, 127, 130, 138, 143, 150, 151, 161, 165, 168, 170], "below": [6, 14, 76, 119, 158], "specif": [6, 7, 167, 170], "There": [6, 81, 168, 170], "coupl": [6, 76], "allow": [6, 7, 8, 9, 14, 19, 76, 84, 124, 125, 163, 166, 168], "extern": [6, 100], "tool": [6, 8, 9, 10], "easiest": 6, "wai": [6, 8, 14, 15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 151, 165, 166, 169, 170], "import": [6, 8, 12, 14, 19, 23, 26, 27, 28, 29, 76, 112, 113, 124, 125, 151, 153, 156, 163, 165, 166, 168, 169, 170], "object": [6, 7, 8, 9, 13, 14, 15, 19, 23, 24, 25, 26, 27, 28, 29, 32, 61, 73, 74, 75, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 148, 151, 161, 163, 166, 168, 169], "exampl": [6, 7, 14, 15, 19, 23, 24, 25, 26, 27, 28, 29, 32, 64, 73, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 124, 125, 128, 129, 151, 153, 156, 157, 158, 160, 161, 163, 165, 166, 167, 168, 169, 170], "ffmpeg_binari": 6, "ffplay_binari": 6, "altern": [6, 76], "after": [6, 7, 21, 32, 40, 81, 143, 165, 169], "env": 6, "file": [6, 8, 9, 19, 31, 32, 34, 35, 40, 65, 73, 74, 76, 81, 117, 118, 119, 120, 121, 124, 125, 127, 128, 129, 130, 132, 133, 134, 135, 136, 138, 139, 142, 143, 148, 151, 161, 163, 164, 168, 169], "2": [6, 14, 19, 23, 29, 32, 34, 37, 38, 40, 43, 62, 76, 110, 121, 148, 151, 152, 153, 156, 157, 158, 160, 165, 168, 169, 170], "avail": [6, 7, 8, 9, 168, 170], "its": [6, 7, 9, 14, 17, 24, 69, 99, 104, 105, 115, 165, 168], "alwai": [6, 7, 15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 165, 168, 169], "detect": [6, 8, 152], "whatev": 6, "gener": [6, 8, 9, 14, 19, 38, 73, 74, 75, 76, 83, 121, 138, 163, 165, 168, 169], "maco": 6, "ex": 6, "lastli": [6, 170], "disk": 6, "specifi": [6, 7, 14, 19, 40, 53, 54, 70, 76, 81, 92, 97, 101, 110, 119, 121, 124, 130, 138, 163, 165], "exact": 6, "r": [6, 7, 76, 157, 168, 170], "c": [6, 7, 19, 168], "program": [6, 7, 9, 10, 19, 44, 60, 169], "consol": 6, "config": [6, 7], "tutori": [7, 10, 167], "aim": [7, 9], "simpl": [7, 8, 9, 165, 166, 169], "short": [7, 167], "introduct": [7, 10, 11], "wish": [7, 121, 168, 169, 170], "explor": 7, "seen": [7, 9, 166, 168], "learn": [7, 8], "basic": [7, 8, 168], "As": [7, 8, 76, 81, 156, 157, 165, 170], "project": [7, 9], "we": [7, 9, 15, 23, 24, 25, 26, 27, 28, 29, 66, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 143, 151, 165, 166, 168, 169, 170], "movi": [7, 8, 76, 121, 134], "browser": [7, 19, 76, 124, 125, 170], "doe": [7, 14, 19, 24, 25, 64, 73, 76, 83, 143, 169, 170], "support": [7, 10, 19, 32, 46, 61, 76, 119, 121, 124, 130, 162, 168], "html5": [7, 19, 76, 124, 125, 170], "mp4": [7, 8, 14, 24, 25, 26, 27, 34, 40, 76, 121, 138, 151, 153, 163, 165, 168, 169], "gather": 7, "resourc": [7, 8, 14, 79, 167], "font": [7, 8, 9, 74, 148, 163, 168], "imag": [7, 8, 9, 14, 19, 61, 69, 73, 74, 76, 108, 109, 110, 118, 119, 122, 124, 125, 128, 129, 130, 139, 148, 151, 154, 155, 156, 157, 158, 163, 166, 168, 169], "easi": [7, 8, 14, 159], "prepar": 7, "unzip": 7, "folder": [7, 8, 119, 168], "familiar": 7, "script": [7, 8, 74], "proce": 7, "let": [7, 8, 166, 168, 169, 170], "modul": [7, 9, 12, 16, 22, 30, 41, 68, 77, 82, 117, 146, 162, 169], "final_clip": [7, 112, 113, 165, 170], "write_videofil": [7, 8, 14, 76, 121, 140, 163, 165, 168, 170], "result": [7, 8, 9, 14, 15, 23, 24, 25, 26, 27, 28, 29, 73, 76, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 135, 136, 138, 151, 157, 165, 168, 169, 170], "numpi": [7, 8, 14, 18, 19, 40, 73, 76, 108, 119, 139, 157, 161, 168, 169], "np": [7, 19, 23, 102, 103, 139, 154, 168], "videofileclip": [7, 8, 9, 14, 24, 25, 26, 27, 69, 76, 151, 153, 163, 165, 169, 170], "bbb": 7, "realli": [7, 8, 9, 14, 166, 168, 170], "limit": [7, 9, 157, 166], "handl": [7, 9], "custom": [7, 8, 9, 67, 163, 165, 166, 168, 169, 170], "anim": [7, 8, 69, 72, 73, 76, 93, 116, 119], "No": 7, "matter": [7, 76, 147], "kind": [7, 9, 15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 166, 168], "ultim": 7, "either": [7, 8, 14, 19, 26, 32, 40, 74, 76, 93, 109, 110, 121, 124, 125, 151, 152, 157, 163, 168, 169, 170], "videoclip": [7, 8, 13, 14, 19, 22, 23, 26, 27, 38, 51, 79, 82, 110, 119, 121, 137, 140, 141, 143, 151, 152, 163, 166, 169], "visual": [7, 8, 82, 168, 169], "element": [7, 8, 72, 167], "audioclip": [7, 8, 13, 14, 22, 23, 28, 34, 35, 37, 38, 43, 51, 76, 166, 169, 170], "those": [7, 8, 143, 165, 166, 168, 169], "find": [7, 9, 11, 43, 121, 151, 153, 168, 170], "exhaust": 7, "list": [7, 9, 14, 19, 20, 21, 70, 72, 74, 76, 79, 81, 90, 91, 104, 105, 112, 113, 119, 135, 138, 142, 148, 151, 152, 156, 157, 158, 160, 161, 163, 164, 168, 169], "focu": 7, "charact": 7, "classic": [7, 169], "task": [7, 8, 166, 167, 170], "turn": [7, 8, 151, 168], "multipl": [7, 8, 9, 14, 15, 23, 24, 25, 26, 27, 28, 29, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 167, 168, 170], "subclip": [7, 8, 14, 76, 133, 163], "intro_clip": 7, "with_subclip": [7, 8, 14, 76, 151, 153, 165, 168, 169, 170], "bird_clip": 7, "16": [7, 19, 76], "20": [7, 8, 158, 169], "bunny_clip": 7, "37": 7, "55": 7, "rodents_clip": 7, "00": [7, 8, 24, 25, 168, 169, 170], "03": [7, 14, 76, 169], "34": 7, "75": 7, "56": 7, "string": [7, 14, 19, 32, 34, 54, 61, 73, 74, 76, 121, 148, 163, 168, 169], "notat": [7, 74], "hh": 7, "mm": 7, "ss": [7, 160], "u": [7, 8], "rambo_clip": 7, "04": 7, "41": 7, "44": 7, "70": [7, 8, 76, 158, 168], "here": [7, 8, 62, 165, 166, 168, 170], "method": [7, 10, 13, 14, 30, 40, 64, 73, 74, 76, 81, 93, 117, 121, 127, 137, 141, 151, 154, 161, 165, 166, 168, 170], "end": [7, 8, 9, 14, 20, 25, 29, 32, 76, 91, 92, 99, 111, 121, 129, 133, 147, 150, 165, 166, 168, 169], "\u00b5": 7, "store": [7, 32, 76, 127, 132, 151, 161, 169], "often": 7, "essenti": [7, 8], "ensur": [7, 15, 23, 24, 25, 26, 27, 28, 29, 40, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "thei": [7, 8, 9, 10, 14, 76, 81, 116, 165, 166, 168, 169], "meet": [7, 19, 38], "vision": [7, 14], "watch": 7, "segment": [7, 9], "re": [7, 121, 127, 166, 170], "adjust": [7, 14], "perfect": [7, 76, 166], "util": [7, 42], "complementari": 7, "audio_preview": 7, "note": [7, 19, 23, 32, 40, 76, 81, 121, 127, 129, 138, 152, 165, 168, 169, 170], "python3": 7, "binari": [7, 10, 45, 128, 170], "warn": [7, 19, 76, 124, 143], "low": [7, 168], "slow": [7, 40, 76, 109, 127, 138], "down": [7, 34, 37, 76, 83, 169, 170], "mai": [7, 8, 9, 19, 20, 38, 73, 76, 121, 152, 166, 169, 170], "notic": 7, "becaus": [7, 9, 15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 127, 143, 166, 168, 169], "track": [7, 8, 9, 163, 165, 168], "encount": 7, "shift": [7, 9], "due": [7, 9], "fact": [7, 76, 165], "cannot": [7, 8, 76, 166, 168, 170], "cours": [7, 8, 169], "action": [7, 169], "paramet": [7, 10, 14, 15, 18, 19, 20, 21, 23, 24, 25, 29, 32, 34, 37, 38, 40, 43, 61, 65, 71, 72, 73, 74, 75, 76, 79, 81, 83, 84, 93, 97, 99, 100, 102, 103, 109, 110, 111, 112, 113, 119, 121, 124, 125, 128, 129, 130, 132, 133, 134, 135, 136, 138, 139, 140, 142, 143, 148, 150, 151, 152, 153, 156, 157, 158, 160, 161, 163, 165, 166, 168, 169, 170], "valu": [7, 14, 19, 23, 29, 40, 43, 61, 75, 76, 83, 84, 96, 100, 102, 103, 110, 121, 124, 125, 127, 128, 138, 151, 152, 153, 160, 161], "easier": [7, 9], "rodent": 7, "bit": [7, 19, 32, 76, 168], "long": [7, 9, 24, 25, 137, 141, 165, 168, 170], "remov": [7, 66, 76, 86, 143, 169], "would": [7, 19, 76, 165, 169], "nice": [7, 9, 76, 138, 165, 166], "quit": [7, 9, 168, 170], "common": [7, 8, 13, 169, 170], "with_cutout": [7, 14, 165], "portion": [7, 32, 40, 170], "between": [7, 14, 19, 23, 40, 71, 76, 81, 90, 91, 101, 102, 103, 127, 133, 148, 150, 151, 152, 157, 168, 169], "06": [7, 24, 25], "modif": [7, 166, 168], "well": [7, 9, 10, 76, 116, 165, 167], "mani": [7, 8, 60, 76, 158, 165, 168, 169, 170], "manipul": [7, 8, 9, 10, 16, 68], "with_": [7, 9], "start_tim": [7, 14, 29, 40, 76, 127, 133, 150, 151, 153, 163, 164], "end_tim": [7, 14, 29, 76, 133, 150, 151, 163, 164], "reassign": 7, "return": [7, 9, 14, 15, 19, 20, 23, 24, 25, 26, 27, 28, 29, 40, 43, 51, 55, 62, 64, 65, 66, 70, 75, 76, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 125, 127, 128, 129, 130, 148, 151, 152, 157, 158, 160, 161, 163, 164, 166, 168, 169], "copi": [7, 9, 14, 15, 19, 23, 24, 25, 26, 27, 28, 29, 55, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 134, 168], "instead": [7, 9, 19, 32, 64, 74, 76, 81, 93, 97, 107, 119, 124, 130, 152, 157, 160, 165, 168, 169, 170], "place": [7, 11, 60, 76, 79, 80, 136, 167, 169, 170], "data": [7, 40, 72, 128, 129, 139, 151, 161, 168], "particular": [7, 163], "miss": [7, 9, 76], "lot": [7, 9, 168, 170], "call": [7, 8, 9, 15, 23, 24, 25, 26, 27, 28, 29, 32, 64, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 145, 158, 168, 169, 170], "offer": [7, 8, 9, 10, 138, 165, 166, 170], "special": [7, 8, 84, 168], "purpos": [7, 70], "imageclip": [7, 69, 71, 74, 76, 102, 103, 130, 148, 169, 170], "textclip": [7, 8, 9, 69, 148, 163, 170], "overlai": [7, 8, 93], "ll": [7, 8], "defin": [7, 15, 74, 76, 80, 93, 95, 138, 139, 152, 157, 158, 165, 166, 168, 169], "content": [7, 14, 19, 76, 123, 124, 125, 148, 163], "size": [7, 18, 19, 32, 34, 38, 40, 71, 74, 76, 79, 80, 81, 100, 102, 103, 110, 112, 113, 135, 138, 142, 148, 156, 157, 158, 165, 168, 170], "color": [7, 8, 70, 71, 74, 76, 79, 80, 81, 84, 90, 91, 96, 100, 101, 102, 103, 106, 108, 110, 112, 113, 148, 156, 157, 158, 163, 165, 166, 168, 170], "made": [7, 9, 18, 19, 20, 38, 70, 76, 79, 81, 119, 121, 168], "them": [7, 8, 9, 15, 23, 24, 25, 26, 27, 28, 29, 40, 63, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 151, 165, 166, 168, 169], "creation": [7, 8, 31, 120, 151, 168], "put": [7, 8, 19, 20, 165, 168, 170], "ttf": [7, 8, 168, 170], "intro_text": 7, "blender": 7, "foundat": 7, "npeach": 7, "font_siz": [7, 8, 74, 148, 163, 168, 170], "50": [7, 14, 76, 86, 158, 168, 169], "fff": [7, 170], "text_align": [7, 74, 170], "center": [7, 8, 19, 40, 74, 76, 81, 86, 110, 124, 125, 156, 165, 170], "bird_text": 7, "unlucki": 7, "bird": 7, "bunny_text": 7, "A": [7, 10, 14, 18, 19, 23, 34, 35, 37, 38, 40, 64, 72, 73, 74, 76, 79, 81, 93, 109, 110, 112, 113, 119, 121, 138, 142, 143, 148, 151, 152, 157, 163, 168, 170], "slightli": [7, 129, 156], "overweight": 7, "rodents_text": 7, "three": [7, 9, 93, 169], "pest": 7, "revenge_text": 7, "reveng": 7, "come": [7, 97, 112, 166, 168, 169], "made_with_text": 7, "logo_clip": 7, "logo_bbb": 7, "png": [7, 73, 76, 119, 130, 138, 168, 170], "width": [7, 19, 34, 71, 74, 76, 79, 80, 81, 86, 109, 111, 124, 125, 135, 138, 142, 148, 151, 157, 158, 165, 168, 169, 170], "400": [7, 86, 170], "moviepy_clip": 7, "logo_moviepi": 7, "300": [7, 86, 112, 113], "rather": [7, 19, 38], "complic": [7, 8, 14, 19, 38, 138], "argument": [7, 9, 19, 23, 32, 61, 71, 74, 76, 83, 124, 125, 128, 151, 160, 163, 166, 168, 169, 170], "accept": [7, 34, 62, 76, 86, 138, 148, 168, 169], "ha": [7, 8, 9, 14, 40, 45, 57, 58, 64, 66, 73, 76, 79, 81, 100, 102, 103, 110, 112, 113, 127, 128, 130, 143, 151, 152, 165, 168, 170], "been": [7, 8, 9, 76, 102, 103, 127, 128, 143, 165], "shorten": 7, "same": [7, 8, 9, 15, 23, 24, 25, 26, 27, 28, 29, 50, 64, 70, 73, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 138, 165, 166, 168, 169], "true": [7, 14, 19, 66, 71, 72, 73, 74, 75, 76, 79, 81, 84, 109, 110, 119, 121, 124, 125, 127, 128, 129, 130, 136, 138, 139, 161, 168], "crop": [7, 9, 76, 89, 163, 169], "rotat": [7, 9, 76, 169], "feel": 7, "free": [7, 76], "experi": 7, "differ": [7, 8, 11, 15, 20, 23, 24, 25, 26, 27, 28, 29, 61, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 128, 147, 151, 166, 167, 168, 169], "achiev": [7, 27], "desir": [7, 40, 76, 107], "singl": [7, 40, 165, 168], "composit": [7, 8, 14, 17, 20, 76, 85, 112, 113, 167, 168, 170], "obvious": [7, 81, 165, 169], "endpoint": 7, "durat": [7, 14, 19, 20, 23, 24, 25, 26, 57, 70, 71, 73, 74, 75, 76, 79, 81, 83, 87, 88, 90, 91, 92, 97, 99, 102, 103, 112, 113, 115, 119, 124, 125, 128, 129, 151, 165, 166, 168, 169, 170], "except": [7, 76, 168, 169, 170], "throw": [7, 127], "error": [7, 19, 57, 58, 76, 124, 125, 128, 129, 138], "infinit": [7, 14, 19, 76, 97, 168, 169, 170], "stop": [7, 14, 169], "indic": [7, 9, 14, 19, 34, 64, 71, 76, 86, 93, 107, 124, 125, 128, 129, 157, 165, 168, 169], "must": [7, 9, 14, 15, 23, 24, 25, 26, 27, 28, 29, 32, 71, 74, 76, 79, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 129, 148, 152, 157, 163, 166, 168, 169, 170], "wa": [7, 9, 15, 19, 23, 24, 25, 26, 27, 28, 29, 70, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 124, 125, 168, 169], "sai": [7, 8, 14, 64, 148], "tell": [7, 51, 108], "with_dur": [7, 8, 14, 165, 168, 170], "with_start": [7, 14, 165], "intro": 7, "with_end": [7, 14, 165, 169], "synchro": 7, "0": [7, 8, 14, 23, 28, 29, 40, 43, 71, 74, 75, 76, 79, 81, 83, 90, 91, 92, 93, 98, 100, 101, 102, 103, 108, 109, 111, 112, 113, 127, 145, 148, 151, 152, 153, 156, 157, 158, 160, 161, 165, 166, 168, 169, 170], "endtim": 7, "calcul": [7, 153], "reciproc": 7, "practic": [7, 75, 165, 168, 169, 170], "idea": [7, 10], "act": [7, 76, 168], "assembl": 7, "compositevideoclip": [7, 8, 76, 87, 88, 112, 113, 143, 163, 165, 168, 170], "arrai": [7, 14, 18, 19, 23, 32, 40, 73, 76, 80, 119, 139, 154, 156, 157, 158, 165, 168, 169], "input": [7, 34, 37, 110, 129, 133, 136], "possibl": [7, 14, 40, 76, 127, 138, 151, 163, 169, 170], "biggest": [7, 165], "manual": [7, 9, 170], "quick_compo": 7, "pretti": [7, 170], "satisfi": [7, 170], "left": [7, 28, 40, 74, 76, 86, 100, 110, 112, 113, 158, 160, 165], "smaller": [7, 9, 76, 81, 129, 138, 151, 165], "than": [7, 8, 9, 19, 23, 40, 76, 83, 119, 121, 124, 125, 128, 129, 151, 152, 168, 169, 170], "simpli": [7, 9, 73, 76, 81, 143, 168, 169, 170], "with_posit": [7, 8, 76, 165, 170], "almost": [7, 8, 9], "everi": [7, 12, 76], "pixel": [7, 14, 34, 71, 74, 76, 80, 86, 93, 96, 100, 101, 102, 103, 109, 121, 130, 138, 139, 142, 143, 148, 156, 157, 158, 161, 166, 168, 169], "h": [7, 76, 111, 169], "200": [7, 86, 151, 158, 168], "360": [7, 19, 76, 124, 125, 169], "anoth": [7, 8, 9, 21, 76, 79, 81, 90, 91, 135, 138, 155, 165, 168], "tupl": [7, 14, 74, 76, 93, 100, 101, 109, 110, 128, 135, 138, 142, 152, 156, 157, 158, 166, 168, 169], "horizont": [7, 74, 76, 80, 104, 111, 148, 158], "vertic": [7, 74, 105, 111, 158], "give": [7, 19, 38, 74, 76, 108, 124, 125], "bottom": [7, 14, 74, 76, 100, 112, 113, 166], "percentag": 7, "float": [7, 14, 19, 23, 24, 25, 26, 28, 29, 40, 43, 71, 74, 76, 83, 85, 86, 87, 88, 90, 91, 92, 93, 94, 95, 97, 98, 99, 100, 101, 106, 107, 108, 109, 110, 112, 113, 114, 124, 125, 133, 148, 150, 151, 152, 153, 156, 157, 158, 160, 165, 168, 169], "pass": [7, 14, 15, 19, 21, 76, 121, 124, 125, 138, 151, 165, 166, 168, 170], "rel": [7, 76, 167], "expect": [7, 46, 170], "raw": [7, 37, 40], "smoother": 7, "through": [7, 8, 9, 10, 14, 168, 170], "crucial": 7, "role": 7, "enhanc": [7, 8], "auditori": 7, "appeal": 7, "appli": [7, 8, 9, 14, 15, 22, 23, 24, 25, 26, 27, 28, 29, 49, 50, 55, 56, 73, 76, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 143, 165, 166, 167, 168, 170], "transform": [7, 14, 19, 29, 40, 73, 76, 83, 108, 157, 165, 166, 169], "better": [7, 8, 9, 41, 138, 151, 167, 170], "whether": [7, 14, 111], "smooth": 7, "alter": 7, "properti": [7, 15, 20, 23, 24, 25, 26, 27, 28, 29, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 166, 168, 169], "exist": [7, 8, 9, 10, 14, 73, 76, 116, 121, 136, 154, 165, 166, 170], "bring": 7, "creativ": 7, "life": [7, 9], "eas": 7, "under": [7, 10, 151, 169], "namespac": [7, 9, 61], "vfx": [7, 14, 24, 76, 102, 103, 109, 112, 113, 151, 165, 169], "afx": [7, 14, 23, 25, 26, 27, 28, 29, 169], "both": [7, 29, 138, 165, 168], "actual": [7, 19, 76, 79, 92, 124, 125, 157, 165, 170], "embed": [7, 19, 76, 124, 125], "with_effect": [7, 14, 23, 24, 25, 26, 27, 28, 29, 102, 103, 109, 112, 113, 151, 165, 166, 169], "fade": [7, 25, 81, 90, 91, 99], "cross": [7, 90, 91], "AND": 7, "natur": 7, "him": [7, 168], "noth": [7, 19, 73, 76, 124, 169], "fanci": [7, 8], "crossfadein": [7, 90, 165], "crossfadeout": [7, 91], "ones": [7, 8, 76, 166], "fadein": [7, 165], "fadeout": 7, "audiofadein": 7, "audiofadeout": 7, "rambo": 7, "appar": [7, 166, 168], "show": [7, 71, 76, 83, 165, 166, 168], "motion": [7, 114], "p": 7, "direct": [7, 100, 111, 157, 158], "shortcut": 7, "with_multiply_spe": [7, 14], "with_multiply_volum": [7, 8, 14, 165, 169], "crope": [7, 76], "multiplyspe": [7, 14, 76, 169], "assign": [7, 166], "quick_comp": 7, "nicer": 7, "mostli": [7, 165], "howev": [7, 9, 76, 168], "fx": [7, 165, 166, 169], "crossfad": 7, "transpar": [7, 73, 74, 76, 79, 80, 81, 100, 101, 110, 130, 148, 168, 170], "opaqu": [7, 76, 81], "wonder": 7, "won": [7, 19, 76], "declar": [7, 168], "mask": [7, 14, 48, 50, 52, 71, 73, 75, 76, 79, 80, 81, 90, 91, 93, 96, 101, 102, 103, 104, 105, 107, 111, 115, 116, 119, 121, 130, 138, 142, 143, 155, 157, 165, 166, 169, 170], "rang": [7, 23, 168], "fulli": [7, 76, 112, 113, 168], "info": [7, 9, 14, 40, 66, 76, 128, 129, 138, 165, 170], "epic": 7, "sepia": 7, "box": 7, "full": [7, 167, 168, 169], "beyond": 7, "scope": [7, 9, 166], "image_transform": [7, 73, 76, 166, 169], "understand": [7, 8, 9, 10, 167, 168, 169], "frame": [7, 8, 14, 18, 19, 20, 32, 34, 37, 38, 40, 70, 72, 73, 75, 76, 81, 92, 95, 100, 114, 119, 121, 124, 127, 129, 132, 138, 142, 143, 150, 151, 152, 153, 160, 161, 166, 168, 169], "ndarrai": [7, 40, 76, 102, 103, 139, 161, 169], "shape": [7, 76, 83, 157, 165, 169], "hxwx3": [7, 139, 168], "math": [7, 168, 169], "oper": [7, 8, 9, 168], "callback": [7, 168, 169], "current": [7, 9, 14, 15, 63, 66, 76, 83, 93, 97, 107, 115, 116, 119, 136, 151, 166, 168, 169], "advanc": [7, 8, 168], "usag": [7, 9, 14, 168], "involv": [7, 8], "matrix": [7, 80], "ignor": [7, 14, 143], "until": [7, 14, 29, 32, 76, 121, 157, 165, 168], "rememb": 7, "mathemat": 7, "could": [7, 8, 15, 23, 24, 25, 26, 27, 28, 29, 61, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 166, 170], "pillow": [7, 8, 9, 168, 170], "entri": [7, 168], "output": [7, 14, 19, 34, 76, 81, 110, 128, 133, 134, 135, 136, 138, 139, 142, 143, 168, 170], "def": [7, 64, 75, 166, 168, 169], "sepia_fitl": 7, "rule": [7, 168], "res_r": 7, "393": 7, "g": [7, 76, 111, 116, 138, 152, 157, 168], "769": 7, "b": [7, 76, 157, 168], "189": 7, "res_g": 7, "349": 7, "686": 7, "168": 7, "res_b": 7, "272": 7, "534": 7, "131": 7, "effici": [7, 8], "multipli": [7, 14, 23, 29, 76, 106, 107, 109], "sepia_matrix": 7, "convert": [7, 8, 32, 52, 53, 54, 62, 76, 164, 168], "float32": 7, "astyp": 7, "n": [7, 14, 97, 127, 151, 168], "k": 7, "transpos": 7, "sepia_imag": 7, "dot": 7, "255": [7, 96, 102, 103, 112, 113, 157, 158, 168], "back": [7, 19], "uint8": [7, 14, 157, 168], "integ": [7, 168], "save": [7, 8, 76, 121, 130, 132, 151, 161, 167, 168], "again": 7, "tweak": 7, "congratul": 7, "successfulli": 7, "trim": 7, "littl": 7, "dig": 7, "deeper": 7, "truli": 7, "captiv": 7, "why": 8, "process": [8, 10, 14, 19, 32, 41, 63, 76, 80, 121, 124, 125, 127, 169], "compos": [8, 20, 76, 81, 165, 167, 168, 170], "gif": [8, 19, 76, 83, 97, 116, 124, 125, 144, 145, 151, 168], "web": [8, 76], "server": 8, "django": 8, "flask": 8, "tediou": 8, "titl": [8, 170], "insert": 8, "cut": [8, 14, 168, 169], "scene": [8, 76, 151, 152], "credit": [8, 111], "subtitl": 8, "effect": [8, 14, 22, 23, 24, 25, 26, 27, 28, 29, 51, 76, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 151, 167, 168], "editor": [8, 148], "matplotlib": 8, "mayavi": 8, "gizeh": 8, "scikit": [8, 9], "NOT": 8, "best": [8, 151, 168, 170], "solut": [8, 170], "analysi": 8, "face": [8, 168], "stuff": [8, 170], "associ": [8, 148], "opencv": [8, 9], "simplecv": 8, "seri": [8, 119, 152, 168], "avconv": 8, "mencod": 8, "faster": [8, 14, 76, 121, 138, 169], "memori": [8, 14, 32, 119], "develop": [8, 9, 10], "goal": [8, 19, 76], "mind": [8, 23, 165, 170], "intuit": [8, 9], "line": [8, 14, 74, 108, 128, 148, 158, 168, 170], "newcom": [8, 169], "flexibl": [8, 165], "total": [8, 92, 97, 148, 166, 168], "control": [8, 28], "portabl": 8, "yet": 8, "stream": [8, 40, 121, 128], "webcam": 8, "live": [8, 168], "distant": 8, "design": [8, 70, 128, 161], "success": [8, 72, 81], "stabil": [8, 136], "sourc": [8, 10, 14, 15, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 32, 34, 35, 37, 38, 40, 43, 45, 46, 53, 54, 56, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 119, 121, 123, 124, 125, 127, 128, 129, 130, 132, 133, 134, 135, 136, 138, 139, 140, 142, 143, 145, 148, 150, 151, 152, 153, 155, 156, 157, 158, 160, 161, 163, 164, 165, 169], "100": [8, 14, 76, 86, 157, 161, 168], "futur": [8, 9], "typic": 8, "load": [8, 9, 32, 119, 151, 161, 165, 167, 169, 170], "modifi": [8, 9, 14, 15, 23, 24, 25, 26, 27, 28, 29, 51, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 165, 166, 167, 168], "togeth": [8, 20, 79, 165, 168], "final": [8, 34, 76, 79, 81, 91, 93, 111, 157, 163, 165, 168, 169, 170], "volum": [8, 14, 19, 23, 27, 28, 29, 169], "ten": 8, "extract": [8, 14, 132, 133, 151, 168], "long_exampl": [8, 170], "example2": [8, 165, 170], "reduc": [8, 9, 76], "80": [8, 153, 170], "hi": [8, 9, 165, 169, 170], "8": [8, 23, 157, 163, 168], "text": [8, 74, 148, 161, 163, 164, 168, 170], "txt_clip": 8, "white": [8, 84, 96, 148, 157, 163, 168], "appear": [8, 79, 81, 87, 90, 91, 165], "screen": [8, 93, 112, 113, 156], "export": [8, 168], "intern": [8, 15, 32, 121, 128, 161, 165, 169], "represent": 8, "media": [8, 19, 24, 25, 76, 124, 151, 153], "fast": [8, 19, 32, 38, 76, 128, 138], "numer": [8, 43], "unit": [8, 76, 110], "lambda": [8, 14, 19, 23, 76, 109, 151, 163, 168, 169], "mix": [8, 76, 165], "Of": [8, 169], "handi": [8, 166], "facilit": 8, "undergon": 9, "larg": [9, 110, 152, 165, 170], "consist": 9, "order": [9, 19, 21, 119, 129, 165, 168, 170], "break": [9, 168], "therefor": 9, "high": [9, 14, 121, 168], "likelihood": 9, "sinc": [9, 73], "reach": [9, 25, 157], "year": 9, "ago": 9, "focus": 9, "3": [9, 14, 19, 29, 76, 151, 153, 157, 160, 163, 165, 168, 169, 170], "7": [9, 76, 157], "advantag": [9, 170], "latest": [9, 79], "languag": [9, 10], "improv": [9, 10, 170], "qualiti": [9, 19, 38, 76, 138, 170], "secur": 9, "upgrad": 9, "continu": 9, "advis": 9, "whenev": [9, 40, 127, 169], "sort": [9, 168, 169], "hand": [9, 169], "packag": 9, "charg": 9, "magic": 9, "initi": [9, 40, 90, 127, 166], "cost": 9, "complex": [9, 76, 166, 168], "pygam": [9, 19], "__all__": 9, "useful": [9, 66, 74, 168, 170], "One": [9, 19, 76, 92, 110, 112, 113, 124, 125, 163, 168], "signific": 9, "set_": 9, "outplac": [9, 76], "untouch": [9, 169], "match": [9, 14, 76, 150, 151, 163], "equival": [9, 14, 15, 23, 24, 25, 26, 27, 28, 29, 73, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "logic": [9, 102, 103], "remain": [9, 40, 93], "global": [9, 61, 166], "implement": [9, 13, 17, 31, 63, 69, 118, 120, 122, 126, 137, 141, 166, 169], "heavili": 9, "structur": 9, "orient": [9, 151], "approach": 9, "previou": [9, 152], "onward": 9, "repres": [9, 14, 18, 32, 70, 73, 83, 109, 110, 119, 151, 157, 168], "organ": 9, "encapsul": 9, "reusabl": [9, 166], "comprehens": 9, "within": 9, "manag": [9, 121, 138, 168], "abstract": [9, 15, 166], "ever": 9, "migrat": 9, "meant": [9, 14, 76, 130], "had": 9, "previous": [9, 15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "favor": 9, "ad": [9, 63, 74, 76, 161, 169], "runtim": [9, 165, 169], "reli": 9, "gracefulli": 9, "dropback": 9, "event": [9, 19, 38, 143], "eventu": [9, 168], "hard": [9, 166, 168], "fragment": 9, "tri": [9, 40, 127], "simpler": 9, "dependici": 9, "altogeth": 9, "imagemagick": 9, "scipi": 9, "sadli": 9, "replac": [9, 14, 64, 74, 76, 96, 114, 166, 169], "io": [9, 163], "slider": 9, "happen": [9, 169], "path": [9, 19, 32, 54, 73, 74, 76, 121, 132, 133, 134, 135, 139, 148, 151, 161, 163, 168], "instanci": [9, 169], "probabl": [9, 66, 143, 166, 168, 170], "evolut": 9, "releas": [9, 10, 14, 79, 168], "becam": 9, "ambit": 9, "sometim": [9, 165, 168, 169, 170], "regard": 9, "manpow": 9, "inconsist": 9, "functionn": 9, "choic": [9, 19, 76, 124, 168, 170], "reflect": 9, "state": [9, 75, 128, 168], "factor": [9, 14, 23, 28, 29, 106, 107, 109], "distribut": 9, "pipi": 9, "diverg": 9, "confus": 9, "chao": 9, "effort": 9, "decid": [9, 165], "major": 9, "interest": [9, 119], "went": 9, "1874": 9, "1089": 9, "2012": 9, "nov": 10, "22": 10, "2024": 10, "q": 10, "mit": 10, "licens": 10, "friendli": [10, 76], "instruct": 10, "kei": 10, "background": [10, 74, 76, 79, 81, 148, 156, 165, 170], "describ": 10, "assum": [10, 14], "saw": 10, "typo": 10, "written": [10, 19, 34, 37, 76, 124, 125, 151, 168], "licenc": 10, "tweet": 10, "definit": 11, "moviepi": [11, 165, 166, 168, 170], "beginn": 11, "submodul": 12, "central": 13, "two": [13, 14, 19, 74, 76, 79, 81, 102, 103, 133, 151, 158, 165, 166, 168, 169], "subclass": [13, 17, 69], "class": [13, 14, 15, 17, 18, 19, 20, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 64, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 126, 127, 128, 137, 138, 141, 142, 147, 148, 149, 150, 151, 159, 160, 161, 162, 163, 165, 166, 168, 169], "none": [14, 19, 26, 29, 34, 35, 38, 48, 67, 70, 71, 73, 74, 75, 76, 79, 80, 81, 83, 84, 86, 90, 91, 92, 93, 95, 97, 100, 107, 109, 110, 111, 119, 121, 124, 125, 127, 130, 133, 136, 138, 139, 140, 143, 145, 148, 151, 152, 153, 155, 157, 158, 160, 163, 164], "close": [14, 32, 34, 37, 40, 61, 76, 79, 121, 127, 138, 142], "chain": [14, 81, 169], "invoc": [14, 46], "get_fram": [14, 40, 76, 102, 103, 127, 166, 168, 169], "rgb": [14, 52, 71, 73, 74, 76, 84, 157, 166, 168], "pictur": [14, 73, 74, 76, 84, 93, 119, 126, 157, 165, 168, 169, 170], "mono": [14, 18, 19, 168], "stereo": [14, 18, 19, 23, 28, 168], "str": [14, 19, 61, 65, 76, 84, 104, 105, 110, 112, 113, 124, 125, 128, 132, 133, 134, 135, 136, 138, 139, 142, 143, 151, 152, 157, 161], "moment": [14, 76, 133, 164, 169], "whose": [14, 19, 27, 72, 75, 76, 80, 124, 148, 151, 157, 168], "is_plai": 14, "express": [14, 76, 163], "15": [14, 19, 62, 76, 124, 125, 168], "35": [14, 76], "min": [14, 62, 76, 168], "sec": [14, 62, 76, 168, 169, 170], "hour": [14, 76, 169], "01": [14, 43, 62, 76, 168, 170], "05": [14, 76], "fals": [14, 19, 32, 35, 40, 70, 71, 72, 73, 75, 76, 79, 81, 110, 119, 121, 127, 128, 129, 138, 139, 140, 161, 168, 170], "els": [14, 81], "vector": [14, 157, 158], "b_1": 14, "b_2": 14, "b_3": 14, "b_i": 14, "tti": 14, "iter_fram": 14, "with_tim": 14, "logger": [14, 19, 35, 67, 76, 124, 125, 132, 133, 134, 135, 136, 140, 145, 151, 152], "dtype": [14, 168], "iter": [14, 19, 61, 76, 128, 151, 168], "hxwxn": 14, "1": [14, 19, 23, 28, 37, 38, 40, 43, 62, 71, 74, 75, 76, 83, 84, 90, 91, 92, 96, 100, 101, 102, 103, 108, 109, 112, 113, 124, 125, 127, 148, 151, 153, 156, 157, 158, 160, 165, 168, 169, 170], "treatment": 14, "field": [14, 128, 129], "scienc": [14, 75, 168], "int": [14, 19, 23, 26, 40, 76, 86, 97, 100, 109, 114, 124, 132, 138, 142, 143, 151, 152, 153, 158, 161, 166, 168, 169], "attribut": [14, 15, 19, 20, 23, 24, 25, 26, 27, 28, 29, 40, 75, 76, 79, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 121, 152, 165, 168, 170], "bool": [14, 19, 66, 76, 84, 109, 110, 124, 125, 136, 138, 139, 151, 161], "ff": 14, "yield": [14, 100, 138], "otherwis": [14, 19, 138, 151, 168], "bar": [14, 19, 35, 67, 76, 124, 125, 132, 133, 134, 135, 136, 140, 145, 151, 152, 166], "proglog": [14, 19, 67, 76, 151, 152], "cast": [14, 128], "print": [14, 40, 64, 67, 151, 168, 170], "maximum": [14, 19, 27, 43, 103, 151], "red": [14, 83, 102, 103, 157, 158, 168], "myclip": [14, 76, 109, 168, 169, 170], "myvideo": [14, 26, 27, 76, 163], "max": 14, "time_transform": [14, 73, 166, 169], "time_func": [14, 73], "apply_to": [14, 73, 76, 104, 105, 111, 166, 169], "keep_dur": [14, 73], "timelin": [14, 73, 169], "being": [14, 15, 23, 24, 25, 26, 27, 28, 29, 34, 37, 40, 61, 72, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 158, 170], "new_t": 14, "filter": [14, 61, 73, 95, 110, 151, 166], "twice": [14, 76], "new_clip": [14, 76, 110], "backward": [14, 32, 115, 116], "func": [14, 48, 49, 50, 51, 52, 55, 57, 58, 59, 64, 73, 151, 166], "signatur": 14, "gf": 14, "scroll": [14, 148, 169], "skip": [14, 40, 128, 129], "cutout": 14, "change_end": 14, "preset": [14, 76, 138, 140, 170], "accordingli": [14, 40, 76], "bilinear": [14, 110, 121, 157], "volumex": 14, "5": [14, 19, 28, 29, 62, 74, 76, 109, 148, 151, 156, 157, 160, 165, 168, 169, 170], "mirrorx": [14, 165], "with_fp": [14, 112, 113], "change_dur": 14, "iterfram": 14, "speed": [14, 18, 76, 83, 107, 111, 168, 169], "conserv": [14, 76, 86, 109], "halv": [14, 28, 29], "mode": 14, "doubl": [14, 29, 169], "with_is_mask": 14, "is_mask": [14, 70, 71, 72, 73, 75, 76, 79, 81, 119, 121, 168], "with_make_fram": [14, 76], "make_fram": [14, 19, 20, 23, 75, 76, 79, 168], "arbitrari": [14, 40, 86, 127, 158], "creator": 14, "with_memo": 14, "memoiz": 14, "final_dur": [14, 107], "multiplyvolum": [14, 169], "slice": 14, "sequenc": [14, 76, 119, 151, 163, 168, 170], "t_start": [14, 169], "t_end": [14, 169], "chosen": [14, 151], "begin": [14, 29, 73, 90], "produc": [14, 19, 76, 81, 83, 133, 148, 153, 168], "neg": [14, 81, 169], "reset": [14, 168], "potenti": 14, "extend": 15, "target": [15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "shallow": [15, 23, 24, 25, 26, 27, 28, 29, 76, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "unset": [15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "behavior": [15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 169, 170], "himself": [15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "idempot": [15, 23, 24, 25, 26, 27, 28, 29, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "audiofileclip": [17, 19, 23, 26, 28, 29, 165, 170], "audioarrayclip": 17, "compositeaudioclip": [17, 19, 165], "nx1": [18, 168], "nx2": [18, 168], "suppos": [18, 168], "usabl": 19, "form": [19, 164, 165], "f_t": 19, "f1_t": 19, "f2_t": 19, "bound": [19, 43, 150], "trespass": 19, "convers": 19, "impact": [19, 76, 148], "variat": [19, 169], "nchannel": [19, 34, 37, 40], "channel": [19, 28, 37, 40, 84, 139], "sine": 19, "wave": 19, "frequenc": [19, 168], "440": [19, 23, 168], "sin": [19, 23, 168, 169], "pi": [19, 23, 168], "44100": [19, 23, 32, 38, 40, 76, 121, 132, 168], "880": [19, 23], "audiopreview": 19, "buffers": [19, 32, 35, 38, 40], "2000": [19, 38, 76], "nbyte": [19, 32, 34, 35, 37, 38, 40], "audio_flag": [19, 38, 143], "video_flag": [19, 38, 143], "rate": [19, 38, 76, 132, 138, 168], "caus": [19, 38, 121, 138], "jump": [19, 38, 152], "11025": [19, 38], "5000": [19, 38], "toler": [19, 38], "bunch": [19, 38], "chunk": [19, 38, 40], "vari": [19, 38, 76, 157, 168], "shouldn": [19, 38, 168], "byte": [19, 37, 38, 40, 76, 127], "encod": [19, 37, 38, 76, 138, 163, 164, 170], "8bit": [19, 37, 38], "16bit": [19, 37, 38], "4": [19, 37, 38, 40, 62, 74, 76, 108, 148, 151, 157, 160, 165, 168, 170], "32bit": [19, 37, 38], "thread": [19, 38, 76, 138, 140, 143, 170], "synchron": [19, 38], "display_in_notebook": [19, 76, 170], "filetyp": [19, 76, 124, 125], "maxdur": [19, 76, 124, 125], "60": [19, 76, 86, 112, 113, 124, 125, 148, 165], "rd_kwarg": [19, 76, 124, 125], "html_kwarg": [19, 76, 124, 125], "displai": [19, 66, 73, 76, 79, 80, 85, 124, 125, 143, 157, 168, 170], "jupyt": [19, 76, 122, 124], "notebook": [19, 76, 122, 124], "remark": [19, 76, 124], "doesn": [19, 76, 80, 121, 124], "mayb": [19, 76, 124, 168], "filenam": [19, 32, 34, 35, 40, 74, 76, 121, 124, 125, 127, 128, 129, 130, 136, 138, 139, 140, 145, 151, 161, 163, 164, 168, 170], "wrong": [19, 76, 124], "physic": [19, 76, 124, 170], "determin": [19, 66, 74, 76, 79, 83, 124, 125, 152], "extens": [19, 32, 65, 76, 121, 124, 125, 138, 170], "rais": [19, 57, 58, 76, 83, 124, 125, 128, 129, 168], "spoil": [19, 76, 124, 125], "cach": [19, 76, 124, 125, 170], "ram": [19, 76, 119, 124, 125, 170], "enabl": [19, 28, 76, 121, 124, 128, 129], "unknown": [19, 76, 124], "dict": [19, 61, 76, 124, 125], "bitrat": [19, 32, 34, 35, 76, 124, 125, 132, 138, 140, 170], "50k": [19, 76, 124, 125], "disabl": [19, 76, 124, 125, 128, 129], "wrap": [19, 35, 38, 63, 74, 76, 124, 125, 130], "div": [19, 76, 124, 125], "align": [19, 74, 76, 124, 125, 168], "middl": [19, 76, 124, 125], "html": [19, 76, 121, 124, 125, 163], "kwarg": [19, 55, 59, 63, 76, 124, 151], "260": [19, 76, 124, 125], "loop": [19, 26, 76, 99, 116, 124, 125, 128, 145, 151, 153, 170], "autoplai": [19, 76, 124, 125, 170], "later": [19, 76, 124, 125, 166, 168], "write_gif": [19, 76, 124, 125, 151, 168, 170], "save_fram": [19, 76, 124, 125, 170], "first_fram": [19, 76, 124, 125], "jpeg": [19, 73, 76, 124, 125, 130, 168], "iter_chunk": 19, "chunksiz": [19, 40], "chunk_dur": 19, "quantiz": 19, "whole": [19, 32, 112, 113, 128, 129, 158, 169], "max_volum": 19, "50000": 19, "level": [19, 23, 24, 25, 76, 127, 138], "to_soundarrai": 19, "tt": [19, 40, 160, 161], "wav": [19, 23, 32, 34, 40, 76, 165, 168, 170], "write_audiofil": [19, 40, 168, 170], "ffmpeg_param": [19, 34, 35, 76, 138, 140, 170], "write_logfil": [19, 35, 76, 140], "self": [19, 64, 75, 127, 168], "sampl": [19, 168], "32": [19, 76], "choos": [19, 76, 93, 119, 165], "pcm_s16le": [19, 76], "pcm_s32le": [19, 76], "500k": [19, 76], "3000k": [19, 76], "Will": [19, 59, 62, 76, 85, 119], "mainli": [19, 70, 76, 151], "necessarili": [19, 76], "term": [19, 76, 137, 138, 141], "option1": [19, 76], "value1": [19, 76], "option2": [19, 76], "value2": [19, 76], "logfil": [19, 34, 138, 139], "log": [19, 76, 138, 139], "sever": [20, 76, 81, 137, 141, 165], "concaten": [21, 81], "offset": [23, 151, 157], "n_repeat": 23, "decai": 23, "repeat": [23, 76, 168], "certain": [23, 168], "constant": [23, 76, 168, 169], "linear": [23, 83, 157, 160, 165], "space": [23, 74, 80, 114], "gap": [23, 148], "repetit": 23, "itself": 23, "increas": [23, 75, 106, 168, 170], "decreas": [23, 106, 170], "constantli": 23, "mute": [23, 24, 28], "greater": [23, 76, 151], "myaudio": 23, "decay": 23, "11": 23, "arriv": [24, 112], "chaplin": [24, 25, 76, 151, 153], "zero": [25, 40, 158, 165, 168], "n_loop": 26, "music": [26, 28, 29, 148], "ogg": [26, 28, 29, 76], "with_audio": [26, 76], "0db": 27, "audio_r": 28, "audio_h": 28, "doubled_audio_clip": 29, "half_audio_clip": 29, "silenc": 29, "third": [29, 44, 168], "silenced_clip": 29, "audiofil": [30, 76, 134, 138, 140], "decode_fil": [32, 40, 121, 127, 128, 129], "200000": [32, 121], "forward": [32, 116], "soundfil": 32, "lifetim": 32, "subprocess": [32, 34, 37, 40, 63, 67, 121, 138, 142, 168], "lock": [32, 121, 168], "construct": [32, 121, 168], "afterward": [32, 121], "subresourc": [32, 121], "clean": [32, 121, 168], "snd": 32, "song": 32, "fps_input": [34, 37], "libfdk_aac": [34, 76], "input_video": 34, "height": [34, 71, 74, 76, 79, 80, 81, 86, 109, 111, 135, 138, 142, 157, 166, 168, 169], "writer": [34, 37, 138, 142], "aliv": [34, 37, 40, 138, 142], "write_fram": [34, 37, 138], "frames_arrai": [34, 37], "chunck": [34, 37], "writ": 34, "libvorbi": [35, 76], "print_info": [40, 127, 129], "buffer": 40, "bigger": [40, 76, 165], "debug": 40, "decod": [40, 127, 128], "signal": 40, "receiv": [40, 166, 169], "buffer_around": 40, "frame_numb": 40, "retriev": [40, 61, 128, 129, 151], "correspond": [40, 65, 75, 76, 151], "timestamp": 40, "pipe": [40, 127, 138], "read_chunk": 40, "proc": 40, "stdout": 40, "row": [40, 76, 80, 168], "column": [40, 76, 80], "pad": [40, 81, 112, 113], "length": 40, "po": [40, 76, 79, 127, 155], "seek": 40, "coder": [40, 127], "painfulli": [40, 127], "fectch": 40, "adjac": [40, 127], "skip_chunk": 40, "discard": 40, "min_tim": 43, "max_tim": 43, "time_resolut": 43, "minimum": [43, 102, 151], "precis": [43, 167], "parti": 44, "cmd": [46, 67], "verifi": [46, 166, 170], "unmodifi": 51, "varnam": [53, 54, 56], "arg": [55, 64], "fun": [56, 76], "launch": 56, "misc": 60, "strategi": [61, 128], "dictionari": [61, 63, 128, 129], "local": 61, "valid": [62, 70, 170], "21": 62, "81": 62, "hr": 62, "3662": 62, "33": 62, "045": 62, "3693": 62, "coma": 62, "99": 62, "popen_param": 63, "popen": 63, "unexpect": 63, "behaviour": 63, "platform": 63, "creationflag": 63, "0x08000000": 63, "extra": [63, 74], "unwant": 63, "child": 63, "old_nam": 64, "deprecated_func": 64, "badli": 64, "to_fil": [64, 161], "write_fil": 64, "blablabla": 64, "host": 66, "system": [66, 168], "graphic": [66, 168], "bsd": 66, "x11": 66, "wayland": 66, "suno": 66, "aix": 66, "cygwin": 66, "execut": [67, 128, 148, 153], "imagesequenceclip": [69, 76], "bitmapclip": 69, "static": [69, 151, 161], "colorclip": [69, 102, 103, 112, 113], "bitmap_fram": 70, "color_dict": 70, "bitmap": 70, "to_bitmap": 70, "data_to_fram": [72, 168], "has_constant_s": [72, 76], "dataset": [72, 168], "d": [72, 101, 114, 157, 168, 170], "img": [73, 168], "fromalpha": 73, "non": [73, 76, 81, 93, 101, 165], "myhous": 73, "somearrai": 73, "tiff": 73, "alpha": [73, 76, 119, 139, 168], "layer": [73, 76, 79, 119, 130, 168], "image_func": [73, 76], "affect": [73, 169], "margin": [74, 165], "bg_color": [74, 76, 79, 80, 81, 110, 148, 156, 168], "stroke_color": [74, 148], "stroke_width": [74, 148], "label": [74, 168], "horizontal_align": 74, "vertical_align": 74, "interlin": [74, 168], "autogener": 74, "opentyp": [74, 168], "caption": [74, 163, 168], "mandatori": 74, "arround": 74, "symmetr": [74, 157], "four": 74, "asymmetr": 74, "especi": 74, "room": 74, "rgba": [74, 121, 130, 138, 139, 142, 143], "hexadecim": 74, "stroke": [74, 148], "contour": [74, 148, 165], "autos": 74, "fit": [74, 168], "exactli": [74, 163], "drawn": 74, "automag": 74, "similar": [74, 168, 170], "css": 74, "bloc": 74, "world": [75, 168], "particularli": [75, 168], "algorithm": [75, 121, 168], "clip_t": [75, 168], "to_fram": [75, 168], "wxh": 75, "got": [76, 168], "boolean": 76, "attach": [76, 168], "y": [76, 86, 111, 155, 156, 157, 158, 161, 165], "posit": [76, 83, 95, 111, 155, 156, 157, 158, 161, 169], "set_po": [76, 79], "relative_po": 76, "overlap": 76, "highest": [76, 79, 81], "aspect_ratio": 76, "aspect": [76, 121, 167, 169], "ratio": [76, 109, 121, 169], "blit_on": 76, "blit": [76, 79], "intens": [76, 95, 168, 170], "deepcopi": 76, "unpickl": 76, "x1": [76, 86, 93, 158, 168], "y1": [76, 86, 93, 158, 168], "x2": [76, 86, 93, 158, 168], "y2": [76, 86, 93, 158, 168], "x_center": [76, 86], "y_center": [76, 86], "rectangular": [76, 86], "subregion": [76, 86], "region": [76, 79, 80, 86, 93, 157, 158, 169], "coordin": [76, 86, 157, 165], "fill_arrai": 76, "pre_arrai": 76, "larger": [76, 152, 165], "excess": 76, "n_frame": [76, 114], "audio_fp": [76, 121, 129, 170], "22050": 76, "audio_buffers": [76, 121], "3000": [76, 132], "audio_nbyt": [76, 121], "slower": [76, 79, 80, 138, 170], "new_siz": [76, 109], "apply_to_mask": [76, 109], "angl": [76, 110], "deg": [76, 110], "resampl": [76, 110], "bicub": [76, 110, 121, 127], "expand": [76, 110], "translat": [76, 110], "degre": [76, 110], "radian": [76, 110], "anticlockwis": [76, 110], "90": [76, 110, 157, 161], "with_mask": [76, 119, 130, 138, 168, 170], "splash": 76, "to_imageclip": [76, 168], "to_rgb": [76, 168], "to_mask": [76, 168], "canal": [76, 169], "with_add_mask": [76, 110], "compar": 76, "constant_s": 76, "with_lay": 76, "mf": 76, "greyscal": [76, 168], "with_on_color": 76, "col_opac": 76, "overlaid": 76, "possibli": 76, "serv": [76, 163], "flatten": 76, "opac": [76, 100], "with_opac": 76, "semi": 76, "op": 76, "mark": [76, 133], "45": 76, "150": [76, 86], "40": 76, "with_sub_effect": 76, "6": [76, 109, 153, 157, 165, 168], "without_audio": [76, 169], "progress_bar": [76, 166], "multiply_spe": 76, "to_gif": 76, "write_images_sequ": [76, 170], "name_format": 76, "numerot": 76, "03d": 76, "index": [76, 170], "digit": [76, 170], "some_fold": 76, "04d": [76, 170], "names_list": 76, "medium": [76, 138, 140], "audio_codec": [76, 134], "audio_bitr": [76, 129], "audio_bufs": 76, "temp_audiofil": 76, "temp_audiofile_path": 76, "remove_temp": 76, "pixel_format": [76, 121, 127, 130, 138, 139, 140, 142, 143], "videofil": [76, 121, 134, 140], "avi": [76, 121, 138, 163], "ogv": [76, 121], "webm": 76, "compress": [76, 138, 170], "tunabl": 76, "higher": [76, 79, 153], "rawvideo": [76, 138], "huge": 76, "everyon": [76, 147], "libvpx": [76, 170], "tini": 76, "incorpor": [76, 138], "soundtrack": [76, 165, 168], "temporari": [76, 168], "libmp3lam": 76, "mp3": [76, 168], "m4a": 76, "unless": [76, 121, 130, 138, 165, 168], "spend": 76, "optim": [76, 170], "ultrafast": [76, 138, 170], "superfast": [76, 138], "veryfast": [76, 138], "veryslow": [76, 138], "placebo": [76, 138], "hurri": 76, "multicor": 76, "120": 76, "my_new_video": 76, "use_bgclip": 79, "share": 79, "visibl": [79, 93, 112, 113, 165, 168], "unmask": 79, "unfil": [79, 80], "That": [79, 165], "playing_clip": 79, "rows_width": 80, "cols_height": 80, "side": [80, 112, 113, 165], "clip1": [80, 165], "clip2": [80, 121, 165], "clip3": [80, 165], "clip4": [80, 165], "clip5": 80, "clip6": 80, "layout": 80, "fulfil": 80, "transit": 81, "correct": [81, 94, 98, 129], "anyth": 81, "resolut": [81, 109], "consequ": 81, "widest": 81, "border": [81, 110, 156], "abov": [81, 86, 151, 152, 168], "consecut": [81, 152], "partli": 81, "cool": 81, "null": [81, 101, 128], "new_dur": 83, "abrupt": 83, "soon": 83, "acceler": 83, "deceler": 83, "slope": 83, "occur": [83, 157], "valueerror": 83, "sooness": 83, "graph": 83, "combin": [83, 86], "preserve_luminos": 84, "desatur": 84, "weight": 84, "rbg": 84, "crt_phosphor": 84, "sum": 84, "duration_on": 85, "duration_off": 85, "At": 85, "disappear": [85, 88, 90, 91], "rectangl": 86, "460": [86, 109, 168, 169], "275": 86, "30": [86, 168], "200px": 86, "wide": 86, "boundari": 86, "600": 86, "initial_color": 90, "final_color": 91, "freeze_dur": 92, "total_dur": [92, 168], "padding_end": 92, "momentarili": 92, "clip_fp": 92, "outside_region": 93, "freez": 93, "rest": 93, "word": [93, 106], "gamma": 94, "callabl": [95, 109], "fy": 95, "radiu": [95, 156, 157, 168], "blur": [95, 114, 156], "head": [95, 168], "invers": 96, "becom": [96, 168, 170], "purpl": 96, "indefinit": [97, 99], "lum": 98, "contrast": 98, "contrast_threshold": 98, "127": [98, 157, 168], "luminos": [98, 152], "overlap_dur": 99, "margin_s": 100, "draw": [100, 168], "around": [100, 168], "add_margin": 100, "threshold": [101, 152], "stiff": 101, "distanc": [101, 150, 151], "parametr": 101, "other_clip": [102, 103], "masked_clip": [102, 103], "yellow": 103, "flip": [104, 105, 165], "bright": 106, "satur": 108, "006": 108, "photo": 108, "flashi": 108, "np_imag": 108, "to_paint": 108, "scale": 109, "720": 109, "800": 109, "02": [109, 169], "swell": 109, "pic": 109, "pil": [109, 168], "72": 110, "rad": 110, "nearest": 110, "hold": 110, "omit": 110, "entir": [110, 170], "post": 110, "upper": [110, 150], "area": 110, "outsid": 110, "x_speed": 111, "y_speed": 111, "x_start": 111, "y_start": 111, "taken": [112, 113, 114], "slided_clip": [112, 113], "concatenate_videoclip": [112, 113, 151, 165], "goe": [113, 157, 170], "awai": [113, 127], "equal": [114, 151, 168], "practiv": 116, "load_imag": 119, "alphanumer": [119, 168], "small": [119, 168, 170], "has_mask": 121, "target_resolut": [121, 127], "resize_algorithm": 121, "fps_sourc": [121, 127, 128, 129], "myholidai": 121, "mymaskvideo": 121, "implicit": 121, "mpeg": 121, "mov": 121, "rare": 121, "desired_width": 121, "desired_height": 121, "popular": 121, "fast_bilinear": 121, "org": [121, 163], "scaler": 121, "collect": 121, "metadata": [121, 128, 129], "tbr": 121, "incorrect": 121, "rgb24": [121, 127, 130, 138, 139, 142, 143], "fail": [121, 170], "emb": [122, 170], "bufsiz": 127, "check_dur": [127, 128, 129], "resize_algo": 127, "delete_lastread": 127, "get_frame_numb": 127, "helper": 127, "read_fram": 127, "upon": 127, "lastread": 127, "skip_fram": 127, "finit": 128, "parser": 128, "pars": [128, 129], "accur": [128, 129, 153], "prefer": [128, 129, 158, 169], "stderr": 128, "parse_audio_stream_data": 128, "parse_data_by_stream_typ": 128, "stream_typ": 128, "parse_dur": 128, "parse_fp": 128, "parse_metadata_field_valu": 128, "pair": [128, 151], "parse_tbr": 128, "tb": 128, "parse_video_stream_data": 128, "video_metadata_type_cast": 128, "video_found": 129, "video_fp": 129, "video_n_fram": 129, "video_dur": 129, "video_bitr": 129, "video_metadata": 129, "audio_found": 129, "audio_metadata": 129, "incomplet": 129, "1222": 129, "bmp": 130, "ffmpeg_videoread": 130, "miscellan": 131, "bind": 131, "inputfil": [132, 133, 135, 136], "outputfil": [132, 133, 134, 135, 136], "inputfile_nam": 133, "sub": [133, 163], "_": [133, 168], "ext": 133, "video_codec": 134, "output_dir": 136, "overwrite_fil": 136, "shaki": 136, "append": 136, "_stabil": 136, "overwrit": 136, "recommend": 138, "seem": 138, "hierarchi": 138, "lossless": 138, "yuv420p": 138, "5000k": 138, "descriptor": 138, "img_arrai": [138, 142], "hxwx4": 139, "discov": [139, 167], "show_fram": 142, "anywher": 143, "believ": 143, "properli": 143, "wait": [143, 168], "shown": 143, "freeimag": 145, "difficult": [147, 154], "creditfil": 148, "blank": 148, "stori": 148, "marcel": 148, "durand": 148, "martin": 148, "didier": 148, "supervisor": 148, "jean": 148, "job": 148, "min_dist": 150, "max_dist": 150, "lst": 151, "instanti": [151, 160, 161], "from_clip": 151, "percent": 151, "condit": 151, "obtain": [151, 165, 168], "satistifi": 151, "framematch": 151, "new_match": 151, "time_span": 151, "distance_threshold": 151, "max_dur": 151, "alik": 151, "maxim": 151, "foo": 151, "matching_fram": 151, "somefil": 151, "dump": 151, "select_scen": 151, "match_threshold": 151, "min_time_span": 151, "nomatch_threshold": 151, "time_dist": 151, "select": 151, "reproduc": 151, "smoothest": 151, "pprint": 151, "ch_clip": 151, "mirror_and_clip": 151, "timemirror": 151, "0000": 151, "1600": 151, "8400": 151, "2800": 151, "7200": 151, "4000": 151, "6000": 151, "gifs_dir": 151, "gif_dir": 151, "time_mirror": 151, "mkdir": [151, 170], "00000100_00000400": 151, "00000115_00000384": 151, "00000128_00000372": 151, "00000140_00000360": 151, "luminosity_threshold": 152, "t1": [152, 163], "t2": [152, 163], "tf": 152, "avg": 152, "thr": 152, "averag": 152, "absolut": 152, "correl": 153, "timefram": 153, "round": 153, "videotool": 153, "deal": 154, "im1": 155, "im2": 155, "screensiz": 156, "canva": 156, "58578644": 156, "p1": [157, 158], "p2": [157, 158], "color_1": [157, 158], "color_2": [157, 158], "radial": 157, "gradient": 157, "gradual": [157, 158], "circular": 157, "fraction": 157, "9": [157, 165], "blurri": 157, "disc": 157, "n_color": 157, "25": [157, 158], "229": 157, "51": 157, "204": 157, "76": 157, "178": 157, "102": 157, "153": 157, "gradient_width": 158, "split": 158, "divid": 158, "sharp": 158, "situat": [158, 170], "antialias": 158, "along": [158, 168], "trajectori": 159, "curv": 159, "ttss": 160, "poorman": 160, "correspondi": 160, "uniqu": 160, "xx": 161, "yy": 161, "compound": 161, "from_fil": 161, "load_list": 161, "166": 161, "333": 161, "554": 161, "474": 161, "384": 161, "91": 161, "addx": 161, "addi": 161, "save_list": 161, "traj": 161, "txy": 161, "tm": 161, "millisecond": 161, "update_interpol": 161, "experiment": 162, "make_textclip": 163, "beforehand": 163, "srt": [163, 164], "standard": [163, 168], "georgia": 163, "regular": 163, "24": [163, 168, 170], "utf": 163, "in_subclip": 163, "match_expr": 163, "expr": 163, "against": 163, "write_srt": 163, "fed": 164, "subtitlesclip": 164, "carri": [165, 168], "easili": [165, 169, 170], "example3": 165, "arent": 165, "clip_arrai": 165, "clips_arrai": 165, "10px": 165, "horizontali": 165, "mirrori": 165, "verticali": 165, "our": [165, 166, 168, 170], "2x2": 165, "480": 165, "theme": [165, 168], "stack": 165, "hide": 165, "But": [165, 168], "et": 165, "compositionclip": 165, "frequent": [165, 166, 170], "abruptli": 165, "technic": [165, 168], "audiotrack": 165, "concatenate_audioclip": 165, "aclip1": 165, "aclip2": 165, "aclip3": 165, "concat": 165, "ontop": 165, "compo": 165, "timepoint": [166, 168, 169], "imagin": [166, 168], "inherint": 166, "__init__": [166, 168], "inherit": [166, 170], "anonym": 166, "decor": 166, "requires_dur": 166, "bar_width": 166, "boilerpl": 166, "dataclass": 166, "strongli": 166, "inspir": 166, "group": 167, "whish": 167, "overview": 167, "wont": 168, "subtyp": 168, "preciselli": 168, "summar": 168, "random": 168, "nois": 168, "200x100": 168, "randint": 168, "sinewav": 168, "make_frame_audio": 168, "example_img_dir": 168, "hello": 168, "380": 168, "unifi": 168, "thoroughli": 168, "applic": 168, "garbag": 168, "collector": 168, "earlier": 168, "earli": 168, "deriv": 168, "unsaf": 168, "thumb": 168, "statement": 168, "block": 168, "simul": 168, "implicitli": 168, "my_audiofil": 168, "immedi": [168, 169], "priori": [168, 170], "belong": 168, "pulsat": 168, "circl": 168, "imagedraw": 168, "128": 168, "puls": 168, "coef": 168, "ellips": 168, "framer": 168, "nor": [168, 169], "cute": 168, "transmit": 168, "image_0001": 168, "jpg": [168, 170], "image_0002": 168, "image_0003": 168, "myclip2": 168, "nb": 168, "result2": 168, "invok": [168, 170], "think": 168, "px": 168, "len": 168, "exterior": 168, "respons": 168, "drown": 168, "superior": 168, "coinflip": 168, "win": 168, "sorri": 168, "coinflipworld": 168, "increment": 168, "win_strik": 168, "strike": 168, "tail": 168, "retri": 168, "red_intens": 168, "victori": 168, "less": [168, 170], "lowest": 168, "param": 168, "least": 168, "stai": 168, "noise_imag": 168, "myclip1": 168, "myclip3": 168, "Or": 168, "autocalcul": 168, "txt_clip1": 168, "ff0000": 168, "ffffff": 168, "txt_clip2": 168, "txt": 168, "500": 168, "blue": [168, 169], "result1": 168, "overflow": 168, "fundament": 168, "compon": 168, "compris": 168, "makefram": 168, "rand": 168, "maskclip1": 168, "maskclip2": 168, "example_mask": 168, "maskclip3": 168, "clip_masked1": 168, "clip_masked2": 168, "clip_masked3": 168, "treat": [168, 169], "core": 168, "494": 168, "523": 168, "587": 168, "659": 168, "698": 168, "note_dur": 168, "sample_r": 168, "note_s": 168, "total_s": 168, "note_frequ": 168, "linspac": 168, "audio_arrai": 168, "item": 168, "audio_clip": 168, "integr": 169, "built": 169, "illustr": 169, "multiply_volum": 169, "loos": 169, "immediatli": 169, "clip_whisp": 169, "lost": 169, "recurr": 169, "neither": 169, "hungri": 169, "54": 169, "thu": 169, "without_": 169, "shorter": 169, "conveni": 169, "dynam": 169, "460px": 169, "half": 169, "multiplycolor": 169, "darken": 169, "your_filt": 169, "my_clip": [169, 170], "accel_x3": 169, "modified_clip1": 169, "modified_clip2": 169, "oscil": 169, "invert_green_blu": 169, "invert": 169, "distinguish": 169, "frame_region": 169, "relli": 169, "recogn": 169, "address": 169, "principl": 169, "peak": 170, "consum": 170, "capabl": 170, "emul": 170, "power": 170, "ffplay_audiopreview": 170, "contrari": 170, "my_video_clip": 170, "my_image_clip": 170, "my_audio_clip": 170, "snaphot": 170, "cell": 170, "onlin": 170, "drawback": 170, "restart": 170, "variou": 170, "obviou": 170, "cinema": 170, "result24fp": 170, "tu": 170, "vp9": 170, "optimis": 170, "minim": 170, "exot": 170, "happi": 170, "processor": 170, "necessarli": 170, "bottleneck": 170, "imposs": 170, "guess": 170, "Then": 170, "leftpad": 170}, "objects": {"": [[12, 0, 0, "-", "moviepy"]], "moviepy": [[13, 0, 0, "-", "Clip"], [15, 0, 0, "-", "Effect"], [16, 0, 0, "-", "audio"], [44, 0, 0, "-", "config"], [47, 0, 0, "-", "decorators"], [60, 0, 0, "-", "tools"], [68, 0, 0, "-", "video"]], "moviepy.Clip": [[14, 1, 1, "", "Clip"]], "moviepy.Clip.Clip": [[14, 2, 1, "", "close"], [14, 2, 1, "", "copy"], [14, 3, 1, "", "duration"], [14, 3, 1, "", "end"], [14, 2, 1, "", "get_frame"], [14, 2, 1, "", "is_playing"], [14, 2, 1, "", "iter_frames"], [14, 3, 1, "", "start"], [14, 2, 1, "", "time_transform"], [14, 2, 1, "", "transform"], [14, 2, 1, "", "with_cutout"], [14, 2, 1, "", "with_duration"], [14, 2, 1, "", "with_effects"], [14, 2, 1, "", "with_end"], [14, 2, 1, "", "with_fps"], [14, 2, 1, "", "with_is_mask"], [14, 2, 1, "", "with_make_frame"], [14, 2, 1, "", "with_memoize"], [14, 2, 1, "", "with_multiply_speed"], [14, 2, 1, "", "with_multiply_volume"], [14, 2, 1, "", "with_start"], [14, 2, 1, "", "with_subclip"]], "moviepy.Effect": [[15, 1, 1, "", "Effect"]], "moviepy.Effect.Effect": [[15, 2, 1, "", "apply"], [15, 2, 1, "", "copy"]], "moviepy.audio": [[17, 0, 0, "-", "AudioClip"], [22, 0, 0, "-", "fx"], [30, 0, 0, "-", "io"], [41, 0, 0, "-", "tools"]], "moviepy.audio.AudioClip": [[18, 1, 1, "", "AudioArrayClip"], [19, 1, 1, "", "AudioClip"], [20, 1, 1, "", "CompositeAudioClip"], [21, 5, 1, "", "concatenate_audioclips"]], "moviepy.audio.AudioClip.AudioClip": [[19, 2, 1, "", "audiopreview"], [19, 2, 1, "", "display_in_notebook"], [19, 2, 1, "", "iter_chunks"], [19, 2, 1, "", "max_volume"], [19, 2, 1, "", "to_soundarray"], [19, 2, 1, "", "write_audiofile"]], "moviepy.audio.AudioClip.CompositeAudioClip": [[20, 4, 1, "", "ends"], [20, 2, 1, "", "make_frame"], [20, 4, 1, "", "starts"]], "moviepy.audio.fx": [[23, 0, 0, "-", "AudioDelay"], [24, 0, 0, "-", "AudioFadeIn"], [25, 0, 0, "-", "AudioFadeOut"], [26, 0, 0, "-", "AudioLoop"], [27, 0, 0, "-", "AudioNormalize"], [28, 0, 0, "-", "MultiplyStereoVolume"], [29, 0, 0, "-", "MultiplyVolume"]], "moviepy.audio.fx.AudioDelay": [[23, 1, 1, "", "AudioDelay"]], "moviepy.audio.fx.AudioDelay.AudioDelay": [[23, 2, 1, "", "apply"], [23, 2, 1, "", "copy"]], "moviepy.audio.fx.AudioFadeIn": [[24, 1, 1, "", "AudioFadeIn"]], "moviepy.audio.fx.AudioFadeIn.AudioFadeIn": [[24, 2, 1, "", "apply"], [24, 2, 1, "", "copy"]], "moviepy.audio.fx.AudioFadeOut": [[25, 1, 1, "", "AudioFadeOut"]], "moviepy.audio.fx.AudioFadeOut.AudioFadeOut": [[25, 2, 1, "", "apply"], [25, 2, 1, "", "copy"]], "moviepy.audio.fx.AudioLoop": [[26, 1, 1, "", "AudioLoop"]], "moviepy.audio.fx.AudioLoop.AudioLoop": [[26, 2, 1, "", "apply"], [26, 2, 1, "", "copy"]], "moviepy.audio.fx.AudioNormalize": [[27, 1, 1, "", "AudioNormalize"]], "moviepy.audio.fx.AudioNormalize.AudioNormalize": [[27, 2, 1, "", "apply"], [27, 2, 1, "", "copy"]], "moviepy.audio.fx.MultiplyStereoVolume": [[28, 1, 1, "", "MultiplyStereoVolume"]], "moviepy.audio.fx.MultiplyStereoVolume.MultiplyStereoVolume": [[28, 2, 1, "", "apply"], [28, 2, 1, "", "copy"]], "moviepy.audio.fx.MultiplyVolume": [[29, 1, 1, "", "MultiplyVolume"]], "moviepy.audio.fx.MultiplyVolume.MultiplyVolume": [[29, 2, 1, "", "apply"], [29, 2, 1, "", "copy"]], "moviepy.audio.io": [[31, 0, 0, "-", "AudioFileClip"], [33, 0, 0, "-", "ffmpeg_audiowriter"], [36, 0, 0, "-", "ffplay_audiopreviewer"], [39, 0, 0, "-", "readers"]], "moviepy.audio.io.AudioFileClip": [[32, 1, 1, "", "AudioFileClip"]], "moviepy.audio.io.AudioFileClip.AudioFileClip": [[32, 3, 1, "", "Lifetime"], [32, 3, 1, "", "buffersize"], [32, 2, 1, "", "close"], [32, 3, 1, "", "fps"], [32, 3, 1, "", "nbytes"]], "moviepy.audio.io.ffmpeg_audiowriter": [[34, 1, 1, "", "FFMPEG_AudioWriter"], [35, 5, 1, "", "ffmpeg_audiowrite"]], "moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter": [[34, 2, 1, "", "close"], [34, 2, 1, "", "write_frames"]], "moviepy.audio.io.ffplay_audiopreviewer": [[37, 1, 1, "", "FFPLAY_AudioPreviewer"], [38, 5, 1, "", "ffplay_audiopreview"]], "moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer": [[37, 2, 1, "", "close"], [37, 2, 1, "", "write_frames"]], "moviepy.audio.io.readers": [[40, 1, 1, "", "FFMPEG_AudioReader"]], "moviepy.audio.io.readers.FFMPEG_AudioReader": [[40, 2, 1, "", "buffer_around"], [40, 2, 1, "", "close"], [40, 2, 1, "", "get_frame"], [40, 2, 1, "", "initialize"], [40, 2, 1, "", "read_chunk"], [40, 2, 1, "", "seek"], [40, 2, 1, "", "skip_chunk"]], "moviepy.audio.tools": [[42, 0, 0, "-", "cuts"]], "moviepy.audio.tools.cuts": [[43, 5, 1, "", "find_audio_period"]], "moviepy.config": [[45, 5, 1, "", "check"], [46, 5, 1, "", "try_cmd"]], "moviepy.decorators": [[48, 5, 1, "", "add_mask_if_none"], [49, 5, 1, "", "apply_to_audio"], [50, 5, 1, "", "apply_to_mask"], [51, 5, 1, "", "audio_video_effect"], [52, 5, 1, "", "convert_masks_to_RGB"], [53, 5, 1, "", "convert_parameter_to_seconds"], [54, 5, 1, "", "convert_path_to_string"], [55, 5, 1, "", "outplace"], [56, 5, 1, "", "preprocess_args"], [57, 5, 1, "", "requires_duration"], [58, 5, 1, "", "requires_fps"], [59, 5, 1, "", "use_clip_fps_by_default"]], "moviepy.tools": [[61, 5, 1, "", "close_all_clips"], [62, 5, 1, "", "convert_to_seconds"], [63, 5, 1, "", "cross_platform_popen_params"], [64, 5, 1, "", "deprecated_version_of"], [65, 5, 1, "", "find_extension"], [66, 5, 1, "", "no_display_available"], [67, 5, 1, "", "subprocess_call"]], "moviepy.video": [[69, 0, 0, "-", "VideoClip"], [77, 0, 0, "-", "compositing"], [82, 0, 0, "-", "fx"], [117, 0, 0, "-", "io"], [146, 0, 0, "-", "tools"]], "moviepy.video.VideoClip": [[70, 1, 1, "", "BitmapClip"], [71, 1, 1, "", "ColorClip"], [72, 1, 1, "", "DataVideoClip"], [73, 1, 1, "", "ImageClip"], [74, 1, 1, "", "TextClip"], [75, 1, 1, "", "UpdatedVideoClip"], [76, 1, 1, "", "VideoClip"]], "moviepy.video.VideoClip.BitmapClip": [[70, 2, 1, "", "to_bitmap"]], "moviepy.video.VideoClip.ImageClip": [[73, 2, 1, "", "image_transform"], [73, 3, 1, "", "img"], [73, 2, 1, "", "time_transform"], [73, 2, 1, "", "transform"]], "moviepy.video.VideoClip.VideoClip": [[76, 4, 1, "", "aspect_ratio"], [76, 3, 1, "", "audio"], [76, 2, 1, "", "blit_on"], [76, 2, 1, "", "copy"], [76, 2, 1, "", "cropped"], [76, 2, 1, "", "display_in_notebook"], [76, 2, 1, "", "fill_array"], [76, 4, 1, "", "h"], [76, 2, 1, "", "image_transform"], [76, 3, 1, "", "is_mask"], [76, 3, 1, "", "layer"], [76, 3, 1, "", "make_frame"], [76, 3, 1, "", "mask"], [76, 4, 1, "", "n_frames"], [76, 3, 1, "", "pos"], [76, 2, 1, "", "preview"], [76, 3, 1, "", "relative_pos"], [76, 2, 1, "", "resized"], [76, 2, 1, "", "rotated"], [76, 2, 1, "", "save_frame"], [76, 2, 1, "", "show"], [76, 3, 1, "", "size"], [76, 2, 1, "", "to_ImageClip"], [76, 2, 1, "", "to_RGB"], [76, 2, 1, "", "to_mask"], [76, 4, 1, "", "w"], [76, 2, 1, "", "with_add_mask"], [76, 2, 1, "", "with_audio"], [76, 2, 1, "", "with_layer"], [76, 2, 1, "", "with_make_frame"], [76, 2, 1, "", "with_mask"], [76, 2, 1, "", "with_on_color"], [76, 2, 1, "", "with_opacity"], [76, 2, 1, "", "with_position"], [76, 2, 1, "", "with_sub_effects"], [76, 2, 1, "", "without_audio"], [76, 2, 1, "", "write_gif"], [76, 2, 1, "", "write_images_sequence"], [76, 2, 1, "", "write_videofile"]], "moviepy.video.compositing": [[78, 0, 0, "-", "CompositeVideoClip"]], "moviepy.video.compositing.CompositeVideoClip": [[79, 1, 1, "", "CompositeVideoClip"], [80, 5, 1, "", "clips_array"], [81, 5, 1, "", "concatenate_videoclips"]], "moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip": [[79, 2, 1, "", "close"], [79, 2, 1, "", "make_frame"], [79, 2, 1, "", "playing_clips"]], "moviepy.video.fx": [[83, 0, 0, "-", "AccelDecel"], [84, 0, 0, "-", "BlackAndWhite"], [85, 0, 0, "-", "Blink"], [86, 0, 0, "-", "Crop"], [87, 0, 0, "-", "CrossFadeIn"], [88, 0, 0, "-", "CrossFadeOut"], [89, 0, 0, "-", "EvenSize"], [90, 0, 0, "-", "FadeIn"], [91, 0, 0, "-", "FadeOut"], [92, 0, 0, "-", "Freeze"], [93, 0, 0, "-", "FreezeRegion"], [94, 0, 0, "-", "GammaCorrection"], [95, 0, 0, "-", "HeadBlur"], [96, 0, 0, "-", "InvertColors"], [97, 0, 0, "-", "Loop"], [98, 0, 0, "-", "LumContrast"], [99, 0, 0, "-", "MakeLoopable"], [100, 0, 0, "-", "Margin"], [101, 0, 0, "-", "MaskColor"], [102, 0, 0, "-", "MasksAnd"], [103, 0, 0, "-", "MasksOr"], [104, 0, 0, "-", "MirrorX"], [105, 0, 0, "-", "MirrorY"], [106, 0, 0, "-", "MultiplyColor"], [107, 0, 0, "-", "MultiplySpeed"], [108, 0, 0, "-", "Painting"], [109, 0, 0, "-", "Resize"], [110, 0, 0, "-", "Rotate"], [111, 0, 0, "-", "Scroll"], [112, 0, 0, "-", "SlideIn"], [113, 0, 0, "-", "SlideOut"], [114, 0, 0, "-", "SuperSample"], [115, 0, 0, "-", "TimeMirror"], [116, 0, 0, "-", "TimeSymmetrize"]], "moviepy.video.fx.AccelDecel": [[83, 1, 1, "", "AccelDecel"]], "moviepy.video.fx.AccelDecel.AccelDecel": [[83, 2, 1, "", "apply"], [83, 2, 1, "", "copy"]], "moviepy.video.fx.BlackAndWhite": [[84, 1, 1, "", "BlackAndWhite"]], "moviepy.video.fx.BlackAndWhite.BlackAndWhite": [[84, 2, 1, "", "apply"], [84, 2, 1, "", "copy"]], "moviepy.video.fx.Blink": [[85, 1, 1, "", "Blink"]], "moviepy.video.fx.Blink.Blink": [[85, 2, 1, "", "apply"], [85, 2, 1, "", "copy"]], "moviepy.video.fx.Crop": [[86, 1, 1, "", "Crop"]], "moviepy.video.fx.Crop.Crop": [[86, 2, 1, "", "apply"], [86, 2, 1, "", "copy"]], "moviepy.video.fx.CrossFadeIn": [[87, 1, 1, "", "CrossFadeIn"]], "moviepy.video.fx.CrossFadeIn.CrossFadeIn": [[87, 2, 1, "", "apply"], [87, 2, 1, "", "copy"]], "moviepy.video.fx.CrossFadeOut": [[88, 1, 1, "", "CrossFadeOut"]], "moviepy.video.fx.CrossFadeOut.CrossFadeOut": [[88, 2, 1, "", "apply"], [88, 2, 1, "", "copy"]], "moviepy.video.fx.EvenSize": [[89, 1, 1, "", "EvenSize"]], "moviepy.video.fx.EvenSize.EvenSize": [[89, 2, 1, "", "apply"], [89, 2, 1, "", "copy"]], "moviepy.video.fx.FadeIn": [[90, 1, 1, "", "FadeIn"]], "moviepy.video.fx.FadeIn.FadeIn": [[90, 2, 1, "", "apply"], [90, 2, 1, "", "copy"]], "moviepy.video.fx.FadeOut": [[91, 1, 1, "", "FadeOut"]], "moviepy.video.fx.FadeOut.FadeOut": [[91, 2, 1, "", "apply"], [91, 2, 1, "", "copy"]], "moviepy.video.fx.Freeze": [[92, 1, 1, "", "Freeze"]], "moviepy.video.fx.Freeze.Freeze": [[92, 2, 1, "", "apply"], [92, 2, 1, "", "copy"]], "moviepy.video.fx.FreezeRegion": [[93, 1, 1, "", "FreezeRegion"]], "moviepy.video.fx.FreezeRegion.FreezeRegion": [[93, 2, 1, "", "apply"], [93, 2, 1, "", "copy"]], "moviepy.video.fx.GammaCorrection": [[94, 1, 1, "", "GammaCorrection"]], "moviepy.video.fx.GammaCorrection.GammaCorrection": [[94, 2, 1, "", "apply"], [94, 2, 1, "", "copy"]], "moviepy.video.fx.HeadBlur": [[95, 1, 1, "", "HeadBlur"]], "moviepy.video.fx.HeadBlur.HeadBlur": [[95, 2, 1, "", "apply"], [95, 2, 1, "", "copy"]], "moviepy.video.fx.InvertColors": [[96, 1, 1, "", "InvertColors"]], "moviepy.video.fx.InvertColors.InvertColors": [[96, 2, 1, "", "apply"], [96, 2, 1, "", "copy"]], "moviepy.video.fx.Loop": [[97, 1, 1, "", "Loop"]], "moviepy.video.fx.Loop.Loop": [[97, 2, 1, "", "apply"], [97, 2, 1, "", "copy"]], "moviepy.video.fx.LumContrast": [[98, 1, 1, "", "LumContrast"]], "moviepy.video.fx.LumContrast.LumContrast": [[98, 2, 1, "", "apply"], [98, 2, 1, "", "copy"]], "moviepy.video.fx.MakeLoopable": [[99, 1, 1, "", "MakeLoopable"]], "moviepy.video.fx.MakeLoopable.MakeLoopable": [[99, 2, 1, "", "apply"], [99, 2, 1, "", "copy"]], "moviepy.video.fx.Margin": [[100, 1, 1, "", "Margin"]], "moviepy.video.fx.Margin.Margin": [[100, 2, 1, "", "add_margin"], [100, 2, 1, "", "apply"], [100, 2, 1, "", "copy"]], "moviepy.video.fx.MaskColor": [[101, 1, 1, "", "MaskColor"]], "moviepy.video.fx.MaskColor.MaskColor": [[101, 2, 1, "", "apply"], [101, 2, 1, "", "copy"]], "moviepy.video.fx.MasksAnd": [[102, 1, 1, "", "MasksAnd"]], "moviepy.video.fx.MasksAnd.MasksAnd": [[102, 2, 1, "", "apply"], [102, 2, 1, "", "copy"]], "moviepy.video.fx.MasksOr": [[103, 1, 1, "", "MasksOr"]], "moviepy.video.fx.MasksOr.MasksOr": [[103, 2, 1, "", "apply"], [103, 2, 1, "", "copy"]], "moviepy.video.fx.MirrorX": [[104, 1, 1, "", "MirrorX"]], "moviepy.video.fx.MirrorX.MirrorX": [[104, 2, 1, "", "apply"], [104, 2, 1, "", "copy"]], "moviepy.video.fx.MirrorY": [[105, 1, 1, "", "MirrorY"]], "moviepy.video.fx.MirrorY.MirrorY": [[105, 2, 1, "", "apply"], [105, 2, 1, "", "copy"]], "moviepy.video.fx.MultiplyColor": [[106, 1, 1, "", "MultiplyColor"]], "moviepy.video.fx.MultiplyColor.MultiplyColor": [[106, 2, 1, "", "apply"], [106, 2, 1, "", "copy"]], "moviepy.video.fx.MultiplySpeed": [[107, 1, 1, "", "MultiplySpeed"]], "moviepy.video.fx.MultiplySpeed.MultiplySpeed": [[107, 2, 1, "", "apply"], [107, 2, 1, "", "copy"]], "moviepy.video.fx.Painting": [[108, 1, 1, "", "Painting"]], "moviepy.video.fx.Painting.Painting": [[108, 2, 1, "", "apply"], [108, 2, 1, "", "copy"], [108, 2, 1, "", "to_painting"]], "moviepy.video.fx.Resize": [[109, 1, 1, "", "Resize"]], "moviepy.video.fx.Resize.Resize": [[109, 2, 1, "", "apply"], [109, 2, 1, "", "copy"], [109, 2, 1, "", "resizer"]], "moviepy.video.fx.Rotate": [[110, 1, 1, "", "Rotate"]], "moviepy.video.fx.Rotate.Rotate": [[110, 2, 1, "", "apply"], [110, 2, 1, "", "copy"]], "moviepy.video.fx.Scroll": [[111, 1, 1, "", "Scroll"]], "moviepy.video.fx.Scroll.Scroll": [[111, 2, 1, "", "apply"], [111, 2, 1, "", "copy"]], "moviepy.video.fx.SlideIn": [[112, 1, 1, "", "SlideIn"]], "moviepy.video.fx.SlideIn.SlideIn": [[112, 2, 1, "", "apply"], [112, 2, 1, "", "copy"]], "moviepy.video.fx.SlideOut": [[113, 1, 1, "", "SlideOut"]], "moviepy.video.fx.SlideOut.SlideOut": [[113, 2, 1, "", "apply"], [113, 2, 1, "", "copy"]], "moviepy.video.fx.SuperSample": [[114, 1, 1, "", "SuperSample"]], "moviepy.video.fx.SuperSample.SuperSample": [[114, 2, 1, "", "apply"], [114, 2, 1, "", "copy"]], "moviepy.video.fx.TimeMirror": [[115, 1, 1, "", "TimeMirror"]], "moviepy.video.fx.TimeMirror.TimeMirror": [[115, 2, 1, "", "apply"], [115, 2, 1, "", "copy"]], "moviepy.video.fx.TimeSymmetrize": [[116, 1, 1, "", "TimeSymmetrize"]], "moviepy.video.fx.TimeSymmetrize.TimeSymmetrize": [[116, 2, 1, "", "apply"], [116, 2, 1, "", "copy"]], "moviepy.video.io": [[118, 0, 0, "-", "ImageSequenceClip"], [120, 0, 0, "-", "VideoFileClip"], [122, 0, 0, "-", "display_in_notebook"], [126, 0, 0, "-", "ffmpeg_reader"], [131, 0, 0, "-", "ffmpeg_tools"], [137, 0, 0, "-", "ffmpeg_writer"], [141, 0, 0, "-", "ffplay_previewer"], [144, 0, 0, "-", "gif_writers"]], "moviepy.video.io.ImageSequenceClip": [[119, 1, 1, "", "ImageSequenceClip"]], "moviepy.video.io.VideoFileClip": [[121, 1, 1, "", "VideoFileClip"]], "moviepy.video.io.VideoFileClip.VideoFileClip": [[121, 2, 1, "", "close"], [121, 3, 1, "", "filename"], [121, 3, 1, "", "fps"]], "moviepy.video.io.display_in_notebook": [[123, 5, 1, "", "HTML2"], [124, 5, 1, "", "display_in_notebook"], [125, 5, 1, "", "html_embed"]], "moviepy.video.io.ffmpeg_reader": [[127, 1, 1, "", "FFMPEG_VideoReader"], [128, 1, 1, "", "FFmpegInfosParser"], [129, 5, 1, "", "ffmpeg_parse_infos"], [130, 5, 1, "", "ffmpeg_read_image"]], "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader": [[127, 2, 1, "", "close"], [127, 2, 1, "", "get_frame"], [127, 2, 1, "", "get_frame_number"], [127, 2, 1, "", "initialize"], [127, 2, 1, "", "read_frame"], [127, 2, 1, "", "skip_frames"]], "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser": [[128, 2, 1, "", "parse"], [128, 2, 1, "", "parse_audio_stream_data"], [128, 2, 1, "", "parse_data_by_stream_type"], [128, 2, 1, "", "parse_duration"], [128, 2, 1, "", "parse_fps"], [128, 2, 1, "", "parse_metadata_field_value"], [128, 2, 1, "", "parse_tbr"], [128, 2, 1, "", "parse_video_stream_data"], [128, 2, 1, "", "video_metadata_type_casting"]], "moviepy.video.io.ffmpeg_tools": [[132, 5, 1, "", "ffmpeg_extract_audio"], [133, 5, 1, "", "ffmpeg_extract_subclip"], [134, 5, 1, "", "ffmpeg_merge_video_audio"], [135, 5, 1, "", "ffmpeg_resize"], [136, 5, 1, "", "ffmpeg_stabilize_video"]], "moviepy.video.io.ffmpeg_writer": [[138, 1, 1, "", "FFMPEG_VideoWriter"], [139, 5, 1, "", "ffmpeg_write_image"], [140, 5, 1, "", "ffmpeg_write_video"]], "moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter": [[138, 2, 1, "", "close"], [138, 2, 1, "", "write_frame"]], "moviepy.video.io.ffplay_previewer": [[142, 1, 1, "", "FFPLAY_VideoPreviewer"], [143, 5, 1, "", "ffplay_preview_video"]], "moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer": [[142, 2, 1, "", "close"], [142, 2, 1, "", "show_frame"]], "moviepy.video.io.gif_writers": [[145, 5, 1, "", "write_gif_with_imageio"]], "moviepy.video.tools": [[147, 0, 0, "-", "credits"], [149, 0, 0, "-", "cuts"], [154, 0, 0, "-", "drawing"], [159, 0, 0, "-", "interpolators"], [162, 0, 0, "-", "subtitles"]], "moviepy.video.tools.credits": [[148, 1, 1, "", "CreditsClip"]], "moviepy.video.tools.cuts": [[150, 1, 1, "", "FramesMatch"], [151, 1, 1, "", "FramesMatches"], [152, 5, 1, "", "detect_scenes"], [153, 5, 1, "", "find_video_period"]], "moviepy.video.tools.cuts.FramesMatches": [[151, 2, 1, "", "best"], [151, 2, 1, "", "filter"], [151, 2, 1, "", "from_clip"], [151, 2, 1, "", "load"], [151, 2, 1, "", "save"], [151, 2, 1, "", "select_scenes"], [151, 2, 1, "", "write_gifs"]], "moviepy.video.tools.drawing": [[155, 5, 1, "", "blit"], [156, 5, 1, "", "circle"], [157, 5, 1, "", "color_gradient"], [158, 5, 1, "", "color_split"]], "moviepy.video.tools.interpolators": [[160, 1, 1, "", "Interpolator"], [161, 1, 1, "", "Trajectory"]], "moviepy.video.tools.interpolators.Trajectory": [[161, 2, 1, "", "addx"], [161, 2, 1, "", "addy"], [161, 2, 1, "", "from_file"], [161, 2, 1, "", "load_list"], [161, 2, 1, "", "save_list"], [161, 2, 1, "", "to_file"], [161, 2, 1, "", "txy"], [161, 2, 1, "", "update_interpolators"]], "moviepy.video.tools.subtitles": [[163, 1, 1, "", "SubtitlesClip"], [164, 5, 1, "", "file_to_subtitles"]], "moviepy.video.tools.subtitles.SubtitlesClip": [[163, 2, 1, "", "in_subclip"], [163, 2, 1, "", "match_expr"], [163, 2, 1, "", "write_srt"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:attribute", "4": "py:property", "5": "py:function"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "attribute", "Python attribute"], "4": ["py", "property", "Python property"], "5": ["py", "function", "Python function"]}, "titleterms": {"moviepi": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 167, 169], "": 0, "contribut": [0, 10], "guidelin": 0, "commun": 0, "github": 0, "prepar": 0, "develop": [0, 1, 2], "code": [0, 8], "convent": 0, "qualiti": 0, "standard": 0, "workflow": 0, "local": 0, "submit": 0, "pull": 0, "request": 0, "instal": [1, 6], "librari": 1, "document": [1, 10], "test": 1, "lint": 1, "The": [2, 8, 167], "guid": [2, 167], "faq": 3, "troubleshoot": 3, "common": 3, "error": 3, "ar": [3, 169], "bug": 3, "gener": 3, "video": [3, 7, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 168, 170], "cannot": 3, "read": 3, "my": 3, "favorit": 3, "player": 3, "i": [3, 8], "can": 3, "t": 3, "seem": 3, "ani": 3, "preview": [3, 7, 170], "make": 3, "them": 3, "slower": 3, "than": 3, "thei": 3, "docker": 4, "prerequisit": [4, 7], "build": 4, "how": [4, 7, 8, 169], "run": 4, "unittest": 4, "from": [4, 7, 9], "your": [4, 166, 170], "own": [4, 166], "script": 4, "get": 5, "start": [5, 165], "addit": 6, "binari": 6, "defin": 6, "custom": 6, "path": 6, "environ": [6, 9], "variabl": 6, "verifi": 6, "find": 6, "10": 7, "minut": 7, "creat": [7, 166], "trailer": 7, "big": 7, "buck": 7, "bunni": 7, "step": 7, "1": 7, "import": [7, 9], "load": [7, 168], "2": [7, 9], "extract": 7, "best": 7, "scene": 7, "3": 7, "take": 7, "first": 7, "look": 7, "4": 7, "modifi": [7, 169], "clip": [7, 8, 9, 13, 14, 165, 168, 169, 170], "cut": [7, 42, 43, 149, 150, 151, 152, 153], "out": 7, "part": 7, "It": 7, "5": 7, "text": 7, "logo": 7, "6": 7, "time": [7, 165, 169], "7": 7, "see": 7, "all": [7, 9, 170], "combin": 7, "8": 7, "posit": [7, 165], "our": 7, "9": 7, "ad": [7, 165], "transit": [7, 165], "effect": [7, 9, 15, 165, 166, 169], "appear": 7, "us": [7, 168, 169], "filter": [7, 169], "11": 7, "render": 7, "final": 7, "file": [7, 170], "conclus": 7, "quick": 8, "present": 8, "do": 8, "need": 8, "advantag": 8, "limit": 8, "exampl": 8, "work": 8, "central": 8, "concept": 8, "updat": 9, "v1": 9, "x": 9, "v2": 9, "drop": 9, "support": 9, "python": 9, "editor": 9, "supress": 9, "simplifi": 9, "renam": 9, "api": [9, 11], "unif": 9, "massiv": 9, "refactor": 9, "move": 9, "function": 9, "class": 9, "fx": [9, 22, 23, 24, 25, 26, 27, 28, 29, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116], "with_effect": 9, "remov": 9, "method": [9, 169], "mani": 9, "extern": 9, "depend": 9, "unifi": 9, "featur": 9, "misclean": 9, "signatur": 9, "chang": [9, 165], "why": [9, 166], "those": 9, "0": 9, "refer": 11, "audio": [16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 165, 168], "audioclip": [17, 18, 19, 20, 21, 168], "audioarrayclip": [18, 168], "compositeaudioclip": 20, "concatenate_audioclip": 21, "audiodelai": 23, "audiofadein": 24, "audiofadeout": 25, "audioloop": 26, "audionorm": 27, "multiplystereovolum": 28, "multiplyvolum": 29, "io": [30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145], "audiofileclip": [31, 32, 168], "ffmpeg_audiowrit": [33, 34, 35], "ffplay_audiopreview": [36, 37, 38], "reader": [39, 40], "ffmpeg_audioread": 40, "tool": [41, 42, 43, 60, 61, 62, 63, 64, 65, 66, 67, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164], "find_audio_period": 43, "config": [44, 45, 46], "check": 45, "try_cmd": 46, "decor": [47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59], "add_mask_if_non": 48, "apply_to_audio": 49, "apply_to_mask": 50, "audio_video_effect": 51, "convert_masks_to_rgb": 52, "convert_parameter_to_second": 53, "convert_path_to_str": 54, "outplac": 55, "preprocess_arg": 56, "requires_dur": 57, "requires_fp": 58, "use_clip_fps_by_default": 59, "close_all_clip": 61, "convert_to_second": 62, "cross_platform_popen_param": 63, "deprecated_version_of": 64, "find_extens": 65, "no_display_avail": 66, "subprocess_cal": 67, "videoclip": [69, 70, 71, 72, 73, 74, 75, 76, 168], "bitmapclip": 70, "colorclip": [71, 168], "datavideoclip": [72, 168], "imageclip": [73, 168], "textclip": [74, 168], "updatedvideoclip": [75, 168], "composit": [77, 78, 79, 80, 81, 165], "compositevideoclip": [78, 79, 80, 81], "clips_arrai": 80, "concatenate_videoclip": 81, "acceldecel": 83, "blackandwhit": 84, "blink": 85, "crop": 86, "crossfadein": 87, "crossfadeout": 88, "evens": 89, "fadein": 90, "fadeout": 91, "freez": 92, "freezeregion": 93, "gammacorrect": 94, "headblur": 95, "invertcolor": 96, "loop": 97, "lumcontrast": 98, "makeloop": 99, "margin": 100, "maskcolor": 101, "masksand": 102, "masksor": 103, "mirrorx": 104, "mirrori": 105, "multiplycolor": 106, "multiplyspe": 107, "paint": 108, "resiz": 109, "rotat": 110, "scroll": 111, "slidein": 112, "slideout": 113, "supersampl": 114, "timemirror": 115, "timesymmetr": 116, "imagesequenceclip": [118, 119, 168], "videofileclip": [120, 121, 168], "lifetim": 121, "display_in_notebook": [122, 123, 124, 125], "html2": 123, "html_emb": 125, "ffmpeg_read": [126, 127, 128, 129, 130], "ffmpeg_videoread": 127, "ffmpeginfospars": 128, "ffmpeg_parse_info": 129, "ffmpeg_read_imag": 130, "ffmpeg_tool": [131, 132, 133, 134, 135, 136], "ffmpeg_extract_audio": 132, "ffmpeg_extract_subclip": 133, "ffmpeg_merge_video_audio": 134, "ffmpeg_res": 135, "ffmpeg_stabilize_video": 136, "ffmpeg_writ": [137, 138, 139, 140], "ffmpeg_videowrit": 138, "ffmpeg_write_imag": 139, "ffmpeg_write_video": 140, "ffplay_preview": [141, 142, 143], "ffplay_videopreview": 142, "ffplay_preview_video": 143, "gif_writ": [144, 145], "write_gif_with_imageio": 145, "credit": [147, 148], "creditsclip": 148, "framesmatch": [150, 151], "detect_scen": 152, "find_video_period": 153, "draw": [154, 155, 156, 157, 158], "blit": 155, "circl": 156, "color_gradi": 157, "color_split": 158, "interpol": [159, 160, 161], "trajectori": 161, "subtitl": [162, 163, 164], "subtitlesclip": 163, "file_to_subtitl": 164, "multipl": 165, "juxtapos": 165, "concaten": 165, "more": 165, "complex": 165, "stop": 165, "an": 166, "user": 167, "resourc": 168, "realas": 168, "close": 168, "categori": 168, "anim": [168, 170], "unanim": 168, "mask": 168, "element": 168, "appli": 169, "modif": 169, "copi": 169, "dure": 169, "memori": 169, "consumpt": 169, "represent": 169, "with_": 169, "appar": 169, "onli": 169, "both": 169, "save": 170, "just": 170, "one": 170, "frame": 170, "show": 170, "jupyt": 170, "notebook": 170, "mp4": 170, "webm": 170, "ogv": 170, "export": 170, "singl": 170, "gif": 170, "imag": 170, "directori": 170}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1, "sphinx": 57}, "alltitles": {"MoviePy\u2019s Contribution Guidelines": [[0, "moviepy-s-contribution-guidelines"]], "Communication on GitHub": [[0, "communication-on-github"]], "Preparing for development": [[0, "preparing-for-development"]], "Coding conventions, code quality": [[0, "coding-conventions-code-quality"]], "Standard contribution workflow": [[0, "standard-contribution-workflow"]], "Local development": [[0, "local-development"]], "Submitting Pull Requests": [[0, "submitting-pull-requests"]], "Installation for MoviePy developers": [[1, "installation-for-moviepy-developers"]], "Libraries for documentation": [[1, "libraries-for-documentation"]], "Libraries for testing and linting": [[1, "libraries-for-testing-and-linting"]], "The MoviePy Developers Guide": [[2, "the-moviepy-developers-guide"]], "FAQ and troubleshooting": [[3, "faq-and-troubleshooting"]], "Common errors that are not bugs": [[3, "common-errors-that-are-not-bugs"]], "MoviePy generated a video that cannot be read by my favorite player.": [[3, "moviepy-generated-a-video-that-cannot-be-read-by-my-favorite-player"]], "I can\u2019t seem to read any video with MoviePy": [[3, "i-can-t-seem-to-read-any-video-with-moviepy"]], "Previewing videos make them slower than they are": [[3, "previewing-videos-make-them-slower-than-they-are"]], "MoviePy Docker": [[4, "moviepy-docker"]], "Prerequisites": [[4, "prerequisites"], [7, "prerequisites"]], "Build the docker": [[4, "build-the-docker"]], "How to run the unittests from docker": [[4, "how-to-run-the-unittests-from-docker"]], "Running your own moviepy script from docker": [[4, "running-your-own-moviepy-script-from-docker"]], "Getting started with MoviePy": [[5, "getting-started-with-moviepy"]], "Installation": [[6, "installation"]], "Installation of additional binaries": [[6, "installation-of-additional-binaries"]], "Define custom paths to binaries": [[6, "define-custom-paths-to-binaries"]], "Environment variables": [[6, "environment-variables"]], "Verify if MoviePy find binaries": [[6, "verify-if-moviepy-find-binaries"]], "MoviePy in 10 Minutes: Creating a Trailer from \u201cBig Buck Bunny\u201d": [[7, "moviepy-in-10-minutes-creating-a-trailer-from-big-buck-bunny"]], "Step 1: Import MoviePy and Load the Video": [[7, "step-1-import-moviepy-and-load-the-video"]], "Step 2: Extract the Best Scenes": [[7, "step-2-extract-the-best-scenes"]], "Step 3: Take a First Look with Preview": [[7, "step-3-take-a-first-look-with-preview"]], "Step 4: Modify a Clip by Cutting Out a Part of It": [[7, "step-4-modify-a-clip-by-cutting-out-a-part-of-it"]], "Step 5: Creating Text/Logo Clips": [[7, "step-5-creating-text-logo-clips"]], "Step 6: Timing the clips": [[7, "step-6-timing-the-clips"]], "Step 7: Seeing how all clips combine": [[7, "step-7-seeing-how-all-clips-combine"]], "Step 8: Positioning our clips": [[7, "step-8-positioning-our-clips"]], "Step 9: Adding transitions and effects": [[7, "step-9-adding-transitions-and-effects"]], "Step 10: Modifying the appearance of a clip using filters": [[7, "step-10-modifying-the-appearance-of-a-clip-using-filters"]], "Step 11: Rendering the final clip to a file": [[7, "step-11-rendering-the-final-clip-to-a-file"]], "Conclusion": [[7, "conclusion"]], "Quick presentation": [[8, "quick-presentation"]], "Do I need MoviePy?": [[8, "do-i-need-moviepy"]], "Advantages and limitations": [[8, "advantages-and-limitations"]], "Example code": [[8, "example-code"]], "How MoviePy works": [[8, "how-moviepy-works"]], "The central concept, the clips": [[8, "the-central-concept-the-clips"]], "Updating from v1.X to v2.X": [[9, "updating-from-v1-x-to-v2-x"]], "Dropping support of Python 2": [[9, "dropping-support-of-python-2"]], "moviepy.editor supression and simplified importation": [[9, "moviepy-editor-supression-and-simplified-importation"]], "Renaming and API unification": [[9, "renaming-and-api-unification"]], "Massive refactoring of effects": [[9, "massive-refactoring-of-effects"]], "Moving effects from function to classes": [[9, "moving-effects-from-function-to-classes"]], "Moving from clip.fx to with_effects()": [[9, "moving-from-clip-fx-to-with-effects"]], "Removing effects as clip methods": [[9, "removing-effects-as-clip-methods"]], "Dropping many external dependencies and unifying environment": [[9, "dropping-many-external-dependencies-and-unifying-environment"]], "Removed features": [[9, "removed-features"]], "Miscleanous signature changes": [[9, "miscleanous-signature-changes"]], "Why all thoses changes and updating from v1.0 to v2.0?": [[9, "why-all-thoses-changes-and-updating-from-v1-0-to-v2-0"]], "MoviePy documentation": [[10, "moviepy-documentation"]], "Contribute!": [[10, "contribute"]], "Api Reference": [[11, "api-reference"]], "moviepy": [[12, "module-moviepy"]], "moviepy.Clip": [[13, "module-moviepy.Clip"]], "moviepy.Clip.Clip": [[14, "moviepy-clip-clip"]], "moviepy.Effect": [[15, "module-moviepy.Effect"]], "moviepy.audio": [[16, "module-moviepy.audio"]], "moviepy.audio.AudioClip": [[17, "module-moviepy.audio.AudioClip"]], "moviepy.audio.AudioClip.AudioArrayClip": [[18, "moviepy-audio-audioclip-audioarrayclip"]], "moviepy.audio.AudioClip.AudioClip": [[19, "moviepy-audio-audioclip-audioclip"]], "moviepy.audio.AudioClip.CompositeAudioClip": [[20, "moviepy-audio-audioclip-compositeaudioclip"]], "moviepy.audio.AudioClip.concatenate_audioclips": [[21, "moviepy-audio-audioclip-concatenate-audioclips"]], "moviepy.audio.fx": [[22, "module-moviepy.audio.fx"]], "moviepy.audio.fx.AudioDelay": [[23, "module-moviepy.audio.fx.AudioDelay"]], "moviepy.audio.fx.AudioFadeIn": [[24, "module-moviepy.audio.fx.AudioFadeIn"]], "moviepy.audio.fx.AudioFadeOut": [[25, "module-moviepy.audio.fx.AudioFadeOut"]], "moviepy.audio.fx.AudioLoop": [[26, "module-moviepy.audio.fx.AudioLoop"]], "moviepy.audio.fx.AudioNormalize": [[27, "module-moviepy.audio.fx.AudioNormalize"]], "moviepy.audio.fx.MultiplyStereoVolume": [[28, "module-moviepy.audio.fx.MultiplyStereoVolume"]], "moviepy.audio.fx.MultiplyVolume": [[29, "module-moviepy.audio.fx.MultiplyVolume"]], "moviepy.audio.io": [[30, "module-moviepy.audio.io"]], "moviepy.audio.io.AudioFileClip": [[31, "module-moviepy.audio.io.AudioFileClip"]], "moviepy.audio.io.AudioFileClip.AudioFileClip": [[32, "moviepy-audio-io-audiofileclip-audiofileclip"]], "moviepy.audio.io.ffmpeg_audiowriter": [[33, "module-moviepy.audio.io.ffmpeg_audiowriter"]], "moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter": [[34, "moviepy-audio-io-ffmpeg-audiowriter-ffmpeg-audiowriter"]], "moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite": [[35, "moviepy-audio-io-ffmpeg-audiowriter-ffmpeg-audiowrite"]], "moviepy.audio.io.ffplay_audiopreviewer": [[36, "module-moviepy.audio.io.ffplay_audiopreviewer"]], "moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer": [[37, "moviepy-audio-io-ffplay-audiopreviewer-ffplay-audiopreviewer"]], "moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview": [[38, "moviepy-audio-io-ffplay-audiopreviewer-ffplay-audiopreview"]], "moviepy.audio.io.readers": [[39, "module-moviepy.audio.io.readers"]], "moviepy.audio.io.readers.FFMPEG_AudioReader": [[40, "moviepy-audio-io-readers-ffmpeg-audioreader"]], "moviepy.audio.tools": [[41, "module-moviepy.audio.tools"]], "moviepy.audio.tools.cuts": [[42, "module-moviepy.audio.tools.cuts"]], "moviepy.audio.tools.cuts.find_audio_period": [[43, "moviepy-audio-tools-cuts-find-audio-period"]], "moviepy.config": [[44, "module-moviepy.config"]], "moviepy.config.check": [[45, "moviepy-config-check"]], "moviepy.config.try_cmd": [[46, "moviepy-config-try-cmd"]], "moviepy.decorators": [[47, "module-moviepy.decorators"]], "moviepy.decorators.add_mask_if_none": [[48, "moviepy-decorators-add-mask-if-none"]], "moviepy.decorators.apply_to_audio": [[49, "moviepy-decorators-apply-to-audio"]], "moviepy.decorators.apply_to_mask": [[50, "moviepy-decorators-apply-to-mask"]], "moviepy.decorators.audio_video_effect": [[51, "moviepy-decorators-audio-video-effect"]], "moviepy.decorators.convert_masks_to_RGB": [[52, "moviepy-decorators-convert-masks-to-rgb"]], "moviepy.decorators.convert_parameter_to_seconds": [[53, "moviepy-decorators-convert-parameter-to-seconds"]], "moviepy.decorators.convert_path_to_string": [[54, "moviepy-decorators-convert-path-to-string"]], "moviepy.decorators.outplace": [[55, "moviepy-decorators-outplace"]], "moviepy.decorators.preprocess_args": [[56, "moviepy-decorators-preprocess-args"]], "moviepy.decorators.requires_duration": [[57, "moviepy-decorators-requires-duration"]], "moviepy.decorators.requires_fps": [[58, "moviepy-decorators-requires-fps"]], "moviepy.decorators.use_clip_fps_by_default": [[59, "moviepy-decorators-use-clip-fps-by-default"]], "moviepy.tools": [[60, "module-moviepy.tools"]], "moviepy.tools.close_all_clips": [[61, "moviepy-tools-close-all-clips"]], "moviepy.tools.convert_to_seconds": [[62, "moviepy-tools-convert-to-seconds"]], "moviepy.tools.cross_platform_popen_params": [[63, "moviepy-tools-cross-platform-popen-params"]], "moviepy.tools.deprecated_version_of": [[64, "moviepy-tools-deprecated-version-of"]], "moviepy.tools.find_extension": [[65, "moviepy-tools-find-extension"]], "moviepy.tools.no_display_available": [[66, "moviepy-tools-no-display-available"]], "moviepy.tools.subprocess_call": [[67, "moviepy-tools-subprocess-call"]], "moviepy.video": [[68, "module-moviepy.video"]], "moviepy.video.VideoClip": [[69, "module-moviepy.video.VideoClip"]], "moviepy.video.VideoClip.BitmapClip": [[70, "moviepy-video-videoclip-bitmapclip"]], "moviepy.video.VideoClip.ColorClip": [[71, "moviepy-video-videoclip-colorclip"]], "moviepy.video.VideoClip.DataVideoClip": [[72, "moviepy-video-videoclip-datavideoclip"]], "moviepy.video.VideoClip.ImageClip": [[73, "moviepy-video-videoclip-imageclip"]], "moviepy.video.VideoClip.TextClip": [[74, "moviepy-video-videoclip-textclip"]], "moviepy.video.VideoClip.UpdatedVideoClip": [[75, "moviepy-video-videoclip-updatedvideoclip"]], "moviepy.video.VideoClip.VideoClip": [[76, "moviepy-video-videoclip-videoclip"]], "moviepy.video.compositing": [[77, "module-moviepy.video.compositing"]], "moviepy.video.compositing.CompositeVideoClip": [[78, "module-moviepy.video.compositing.CompositeVideoClip"]], "moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip": [[79, "moviepy-video-compositing-compositevideoclip-compositevideoclip"]], "moviepy.video.compositing.CompositeVideoClip.clips_array": [[80, "moviepy-video-compositing-compositevideoclip-clips-array"]], "moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips": [[81, "moviepy-video-compositing-compositevideoclip-concatenate-videoclips"]], "moviepy.video.fx": [[82, "module-moviepy.video.fx"]], "moviepy.video.fx.AccelDecel": [[83, "module-moviepy.video.fx.AccelDecel"]], "moviepy.video.fx.BlackAndWhite": [[84, "module-moviepy.video.fx.BlackAndWhite"]], "moviepy.video.fx.Blink": [[85, "module-moviepy.video.fx.Blink"]], "moviepy.video.fx.Crop": [[86, "module-moviepy.video.fx.Crop"]], "moviepy.video.fx.CrossFadeIn": [[87, "module-moviepy.video.fx.CrossFadeIn"]], "moviepy.video.fx.CrossFadeOut": [[88, "module-moviepy.video.fx.CrossFadeOut"]], "moviepy.video.fx.EvenSize": [[89, "module-moviepy.video.fx.EvenSize"]], "moviepy.video.fx.FadeIn": [[90, "module-moviepy.video.fx.FadeIn"]], "moviepy.video.fx.FadeOut": [[91, "module-moviepy.video.fx.FadeOut"]], "moviepy.video.fx.Freeze": [[92, "module-moviepy.video.fx.Freeze"]], "moviepy.video.fx.FreezeRegion": [[93, "module-moviepy.video.fx.FreezeRegion"]], "moviepy.video.fx.GammaCorrection": [[94, "module-moviepy.video.fx.GammaCorrection"]], "moviepy.video.fx.HeadBlur": [[95, "module-moviepy.video.fx.HeadBlur"]], "moviepy.video.fx.InvertColors": [[96, "module-moviepy.video.fx.InvertColors"]], "moviepy.video.fx.Loop": [[97, "module-moviepy.video.fx.Loop"]], "moviepy.video.fx.LumContrast": [[98, "module-moviepy.video.fx.LumContrast"]], "moviepy.video.fx.MakeLoopable": [[99, "module-moviepy.video.fx.MakeLoopable"]], "moviepy.video.fx.Margin": [[100, "module-moviepy.video.fx.Margin"]], "moviepy.video.fx.MaskColor": [[101, "module-moviepy.video.fx.MaskColor"]], "moviepy.video.fx.MasksAnd": [[102, "module-moviepy.video.fx.MasksAnd"]], "moviepy.video.fx.MasksOr": [[103, "module-moviepy.video.fx.MasksOr"]], "moviepy.video.fx.MirrorX": [[104, "module-moviepy.video.fx.MirrorX"]], "moviepy.video.fx.MirrorY": [[105, "module-moviepy.video.fx.MirrorY"]], "moviepy.video.fx.MultiplyColor": [[106, "module-moviepy.video.fx.MultiplyColor"]], "moviepy.video.fx.MultiplySpeed": [[107, "module-moviepy.video.fx.MultiplySpeed"]], "moviepy.video.fx.Painting": [[108, "module-moviepy.video.fx.Painting"]], "moviepy.video.fx.Resize": [[109, "module-moviepy.video.fx.Resize"]], "moviepy.video.fx.Rotate": [[110, "module-moviepy.video.fx.Rotate"]], "moviepy.video.fx.Scroll": [[111, "module-moviepy.video.fx.Scroll"]], "moviepy.video.fx.SlideIn": [[112, "module-moviepy.video.fx.SlideIn"]], "moviepy.video.fx.SlideOut": [[113, "module-moviepy.video.fx.SlideOut"]], "moviepy.video.fx.SuperSample": [[114, "module-moviepy.video.fx.SuperSample"]], "moviepy.video.fx.TimeMirror": [[115, "module-moviepy.video.fx.TimeMirror"]], "moviepy.video.fx.TimeSymmetrize": [[116, "module-moviepy.video.fx.TimeSymmetrize"]], "moviepy.video.io": [[117, "module-moviepy.video.io"]], "moviepy.video.io.ImageSequenceClip": [[118, "module-moviepy.video.io.ImageSequenceClip"]], "moviepy.video.io.ImageSequenceClip.ImageSequenceClip": [[119, "moviepy-video-io-imagesequenceclip-imagesequenceclip"]], "moviepy.video.io.VideoFileClip": [[120, "module-moviepy.video.io.VideoFileClip"]], "moviepy.video.io.VideoFileClip.VideoFileClip": [[121, "moviepy-video-io-videofileclip-videofileclip"]], "Lifetime": [[121, "lifetime"]], "moviepy.video.io.display_in_notebook": [[122, "module-moviepy.video.io.display_in_notebook"]], "moviepy.video.io.display_in_notebook.HTML2": [[123, "moviepy-video-io-display-in-notebook-html2"]], "moviepy.video.io.display_in_notebook.display_in_notebook": [[124, "moviepy-video-io-display-in-notebook-display-in-notebook"]], "moviepy.video.io.display_in_notebook.html_embed": [[125, "moviepy-video-io-display-in-notebook-html-embed"]], "moviepy.video.io.ffmpeg_reader": [[126, "module-moviepy.video.io.ffmpeg_reader"]], "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader": [[127, "moviepy-video-io-ffmpeg-reader-ffmpeg-videoreader"]], "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser": [[128, "moviepy-video-io-ffmpeg-reader-ffmpeginfosparser"]], "moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos": [[129, "moviepy-video-io-ffmpeg-reader-ffmpeg-parse-infos"]], "moviepy.video.io.ffmpeg_reader.ffmpeg_read_image": [[130, "moviepy-video-io-ffmpeg-reader-ffmpeg-read-image"]], "moviepy.video.io.ffmpeg_tools": [[131, "module-moviepy.video.io.ffmpeg_tools"]], "moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio": [[132, "moviepy-video-io-ffmpeg-tools-ffmpeg-extract-audio"]], "moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip": [[133, "moviepy-video-io-ffmpeg-tools-ffmpeg-extract-subclip"]], "moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio": [[134, "moviepy-video-io-ffmpeg-tools-ffmpeg-merge-video-audio"]], "moviepy.video.io.ffmpeg_tools.ffmpeg_resize": [[135, "moviepy-video-io-ffmpeg-tools-ffmpeg-resize"]], "moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video": [[136, "moviepy-video-io-ffmpeg-tools-ffmpeg-stabilize-video"]], "moviepy.video.io.ffmpeg_writer": [[137, "module-moviepy.video.io.ffmpeg_writer"]], "moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter": [[138, "moviepy-video-io-ffmpeg-writer-ffmpeg-videowriter"]], "moviepy.video.io.ffmpeg_writer.ffmpeg_write_image": [[139, "moviepy-video-io-ffmpeg-writer-ffmpeg-write-image"]], "moviepy.video.io.ffmpeg_writer.ffmpeg_write_video": [[140, "moviepy-video-io-ffmpeg-writer-ffmpeg-write-video"]], "moviepy.video.io.ffplay_previewer": [[141, "module-moviepy.video.io.ffplay_previewer"]], "moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer": [[142, "moviepy-video-io-ffplay-previewer-ffplay-videopreviewer"]], "moviepy.video.io.ffplay_previewer.ffplay_preview_video": [[143, "moviepy-video-io-ffplay-previewer-ffplay-preview-video"]], "moviepy.video.io.gif_writers": [[144, "module-moviepy.video.io.gif_writers"]], "moviepy.video.io.gif_writers.write_gif_with_imageio": [[145, "moviepy-video-io-gif-writers-write-gif-with-imageio"]], "moviepy.video.tools": [[146, "module-moviepy.video.tools"]], "moviepy.video.tools.credits": [[147, "module-moviepy.video.tools.credits"]], "moviepy.video.tools.credits.CreditsClip": [[148, "moviepy-video-tools-credits-creditsclip"]], "moviepy.video.tools.cuts": [[149, "module-moviepy.video.tools.cuts"]], "moviepy.video.tools.cuts.FramesMatch": [[150, "moviepy-video-tools-cuts-framesmatch"]], "moviepy.video.tools.cuts.FramesMatches": [[151, "moviepy-video-tools-cuts-framesmatches"]], "moviepy.video.tools.cuts.detect_scenes": [[152, "moviepy-video-tools-cuts-detect-scenes"]], "moviepy.video.tools.cuts.find_video_period": [[153, "moviepy-video-tools-cuts-find-video-period"]], "moviepy.video.tools.drawing": [[154, "module-moviepy.video.tools.drawing"]], "moviepy.video.tools.drawing.blit": [[155, "moviepy-video-tools-drawing-blit"]], "moviepy.video.tools.drawing.circle": [[156, "moviepy-video-tools-drawing-circle"]], "moviepy.video.tools.drawing.color_gradient": [[157, "moviepy-video-tools-drawing-color-gradient"]], "moviepy.video.tools.drawing.color_split": [[158, "moviepy-video-tools-drawing-color-split"]], "moviepy.video.tools.interpolators": [[159, "module-moviepy.video.tools.interpolators"]], "moviepy.video.tools.interpolators.Interpolator": [[160, "moviepy-video-tools-interpolators-interpolator"]], "moviepy.video.tools.interpolators.Trajectory": [[161, "moviepy-video-tools-interpolators-trajectory"]], "moviepy.video.tools.subtitles": [[162, "module-moviepy.video.tools.subtitles"]], "moviepy.video.tools.subtitles.SubtitlesClip": [[163, "moviepy-video-tools-subtitles-subtitlesclip"]], "moviepy.video.tools.subtitles.file_to_subtitles": [[164, "moviepy-video-tools-subtitles-file-to-subtitles"]], "Compositing multiple clips": [[165, "compositing-multiple-clips"]], "Juxtaposing and concatenating clips": [[165, "juxtaposing-and-concatenating-clips"]], "Concatenating multiple clips": [[165, "concatenating-multiple-clips"]], "Juxtaposing multiple clips": [[165, "juxtaposing-multiple-clips"]], "More complex video compositing": [[165, "more-complex-video-compositing"]], "Changing starting and stopping times of clips": [[165, "changing-starting-and-stopping-times-of-clips"]], "Positioning clips": [[165, "positioning-clips"]], "Adding transitions effects": [[165, "adding-transitions-effects"]], "Compositing audio clips": [[165, "compositing-audio-clips"]], "Creating your own effects": [[166, "creating-your-own-effects"]], "Why creating your own effects?": [[166, "why-creating-your-own-effects"]], "Creating an effect": [[166, "creating-an-effect"]], "The MoviePy User Guide": [[167, "the-moviepy-user-guide"]], "Loading resources as clips": [[168, "loading-resources-as-clips"]], "Realasing resources by closing a clip": [[168, "realasing-resources-by-closing-a-clip"]], "Categories of video clips": [[168, "categories-of-video-clips"]], "Animated clips": [[168, "animated-clips"]], "VideoClip": [[168, "videoclip"]], "VideoFileClip": [[168, "videofileclip"]], "ImageSequenceClip": [[168, "imagesequenceclip"]], "DataVideoClip": [[168, "datavideoclip"]], "UpdatedVideoClip": [[168, "updatedvideoclip"]], "Unanimated clips": [[168, "unanimated-clips"]], "ImageClip": [[168, "imageclip"]], "TextClip": [[168, "textclip"]], "ColorClip": [[168, "colorclip"]], "Mask clips": [[168, "mask-clips"]], "Using audio elements with audio clips": [[168, "using-audio-elements-with-audio-clips"]], "AudioClip": [[168, "audioclip"]], "AudioFileClip": [[168, "audiofileclip"]], "AudioArrayClip": [[168, "audioarrayclip"]], "Modifying clips and apply effects": [[169, "modifying-clips-and-apply-effects"]], "How modifications are applied to a clip ?": [[169, "how-modifications-are-applied-to-a-clip"]], "Clip copy during modification": [[169, "clip-copy-during-modification"]], "Memory consumption of effect and modifications": [[169, "memory-consumption-of-effect-and-modifications"]], "Time representations in MoviePy": [[169, "time-representations-in-moviepy"]], "Modify a clip using the with_* methods": [[169, "modify-a-clip-using-the-with-methods"]], "Modify a clip using effects": [[169, "modify-a-clip-using-effects"]], "Modify a clip apparence and timing using filters": [[169, "modify-a-clip-apparence-and-timing-using-filters"]], "Modify only the timing of a Clip": [[169, "modify-only-the-timing-of-a-clip"]], "Modifying only the apparence of a Clip": [[169, "modifying-only-the-apparence-of-a-clip"]], "Modifying both the apparence and the timing of a Clip": [[169, "modifying-both-the-apparence-and-the-timing-of-a-clip"]], "Previewing and saving video clips": [[170, "previewing-and-saving-video-clips"]], "Previewing a clip": [[170, "previewing-a-clip"]], "Preview a clip as a video": [[170, "preview-a-clip-as-a-video"]], "Preview just one frame of a clip": [[170, "preview-just-one-frame-of-a-clip"]], "Showing a clip in Jupyter Notebook": [[170, "showing-a-clip-in-jupyter-notebook"]], "Save your clip into a file": [[170, "save-your-clip-into-a-file"]], "Video files (.mp4, .webm, .ogv\u2026)": [[170, "video-files-mp4-webm-ogv"]], "Export a single frame of the clip": [[170, "export-a-single-frame-of-the-clip"]], "Animated GIFs": [[170, "animated-gifs"]], "Export all the clip as images in a directory": [[170, "export-all-the-clip-as-images-in-a-directory"]]}, "indexentries": {"module": [[12, "module-moviepy"], [13, "module-moviepy.Clip"], [15, "module-moviepy.Effect"], [16, "module-moviepy.audio"], [17, "module-moviepy.audio.AudioClip"], [22, "module-moviepy.audio.fx"], [23, "module-moviepy.audio.fx.AudioDelay"], [24, "module-moviepy.audio.fx.AudioFadeIn"], [25, "module-moviepy.audio.fx.AudioFadeOut"], [26, "module-moviepy.audio.fx.AudioLoop"], [27, "module-moviepy.audio.fx.AudioNormalize"], [28, "module-moviepy.audio.fx.MultiplyStereoVolume"], [29, "module-moviepy.audio.fx.MultiplyVolume"], [30, "module-moviepy.audio.io"], [31, "module-moviepy.audio.io.AudioFileClip"], [33, "module-moviepy.audio.io.ffmpeg_audiowriter"], [36, "module-moviepy.audio.io.ffplay_audiopreviewer"], [39, "module-moviepy.audio.io.readers"], [41, "module-moviepy.audio.tools"], [42, "module-moviepy.audio.tools.cuts"], [44, "module-moviepy.config"], [47, "module-moviepy.decorators"], [60, "module-moviepy.tools"], [68, "module-moviepy.video"], [69, "module-moviepy.video.VideoClip"], [77, "module-moviepy.video.compositing"], [78, "module-moviepy.video.compositing.CompositeVideoClip"], [82, "module-moviepy.video.fx"], [83, "module-moviepy.video.fx.AccelDecel"], [84, "module-moviepy.video.fx.BlackAndWhite"], [85, "module-moviepy.video.fx.Blink"], [86, "module-moviepy.video.fx.Crop"], [87, "module-moviepy.video.fx.CrossFadeIn"], [88, "module-moviepy.video.fx.CrossFadeOut"], [89, "module-moviepy.video.fx.EvenSize"], [90, "module-moviepy.video.fx.FadeIn"], [91, "module-moviepy.video.fx.FadeOut"], [92, "module-moviepy.video.fx.Freeze"], [93, "module-moviepy.video.fx.FreezeRegion"], [94, "module-moviepy.video.fx.GammaCorrection"], [95, "module-moviepy.video.fx.HeadBlur"], [96, "module-moviepy.video.fx.InvertColors"], [97, "module-moviepy.video.fx.Loop"], [98, "module-moviepy.video.fx.LumContrast"], [99, "module-moviepy.video.fx.MakeLoopable"], [100, "module-moviepy.video.fx.Margin"], [101, "module-moviepy.video.fx.MaskColor"], [102, "module-moviepy.video.fx.MasksAnd"], [103, "module-moviepy.video.fx.MasksOr"], [104, "module-moviepy.video.fx.MirrorX"], [105, "module-moviepy.video.fx.MirrorY"], [106, "module-moviepy.video.fx.MultiplyColor"], [107, "module-moviepy.video.fx.MultiplySpeed"], [108, "module-moviepy.video.fx.Painting"], [109, "module-moviepy.video.fx.Resize"], [110, "module-moviepy.video.fx.Rotate"], [111, "module-moviepy.video.fx.Scroll"], [112, "module-moviepy.video.fx.SlideIn"], [113, "module-moviepy.video.fx.SlideOut"], [114, "module-moviepy.video.fx.SuperSample"], [115, "module-moviepy.video.fx.TimeMirror"], [116, "module-moviepy.video.fx.TimeSymmetrize"], [117, "module-moviepy.video.io"], [118, "module-moviepy.video.io.ImageSequenceClip"], [120, "module-moviepy.video.io.VideoFileClip"], [122, "module-moviepy.video.io.display_in_notebook"], [126, "module-moviepy.video.io.ffmpeg_reader"], [131, "module-moviepy.video.io.ffmpeg_tools"], [137, "module-moviepy.video.io.ffmpeg_writer"], [141, "module-moviepy.video.io.ffplay_previewer"], [144, "module-moviepy.video.io.gif_writers"], [146, "module-moviepy.video.tools"], [147, "module-moviepy.video.tools.credits"], [149, "module-moviepy.video.tools.cuts"], [154, "module-moviepy.video.tools.drawing"], [159, "module-moviepy.video.tools.interpolators"], [162, "module-moviepy.video.tools.subtitles"]], "moviepy": [[12, "module-moviepy"]], "moviepy.clip": [[13, "module-moviepy.Clip"]], "clip (class in moviepy.clip)": [[14, "moviepy.Clip.Clip"]], "close() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.close"]], "copy() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.copy"]], "duration (moviepy.clip.clip attribute)": [[14, "moviepy.Clip.Clip.duration"]], "end (moviepy.clip.clip attribute)": [[14, "moviepy.Clip.Clip.end"]], "get_frame() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.get_frame"]], "is_playing() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.is_playing"]], "iter_frames() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.iter_frames"]], "start (moviepy.clip.clip attribute)": [[14, "moviepy.Clip.Clip.start"]], "time_transform() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.time_transform"]], "transform() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.transform"]], "with_cutout() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_cutout"]], "with_duration() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_duration"]], "with_effects() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_effects"]], "with_end() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_end"]], "with_fps() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_fps"]], "with_is_mask() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_is_mask"]], "with_make_frame() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_make_frame"]], "with_memoize() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_memoize"]], "with_multiply_speed() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_multiply_speed"]], "with_multiply_volume() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_multiply_volume"]], "with_start() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_start"]], "with_subclip() (moviepy.clip.clip method)": [[14, "moviepy.Clip.Clip.with_subclip"]], "effect (class in moviepy.effect)": [[15, "moviepy.Effect.Effect"]], "apply() (moviepy.effect.effect method)": [[15, "moviepy.Effect.Effect.apply"]], "copy() (moviepy.effect.effect method)": [[15, "moviepy.Effect.Effect.copy"]], "moviepy.effect": [[15, "module-moviepy.Effect"]], "moviepy.audio": [[16, "module-moviepy.audio"]], "moviepy.audio.audioclip": [[17, "module-moviepy.audio.AudioClip"]], "audioarrayclip (class in moviepy.audio.audioclip)": [[18, "moviepy.audio.AudioClip.AudioArrayClip"]], "audioclip (class in moviepy.audio.audioclip)": [[19, "moviepy.audio.AudioClip.AudioClip"]], "audiopreview() (moviepy.audio.audioclip.audioclip method)": [[19, "moviepy.audio.AudioClip.AudioClip.audiopreview"]], "display_in_notebook() (moviepy.audio.audioclip.audioclip method)": [[19, "moviepy.audio.AudioClip.AudioClip.display_in_notebook"]], "iter_chunks() (moviepy.audio.audioclip.audioclip method)": [[19, "moviepy.audio.AudioClip.AudioClip.iter_chunks"]], "max_volume() (moviepy.audio.audioclip.audioclip method)": [[19, "moviepy.audio.AudioClip.AudioClip.max_volume"]], "to_soundarray() (moviepy.audio.audioclip.audioclip method)": [[19, "moviepy.audio.AudioClip.AudioClip.to_soundarray"]], "write_audiofile() (moviepy.audio.audioclip.audioclip method)": [[19, "moviepy.audio.AudioClip.AudioClip.write_audiofile"]], "compositeaudioclip (class in moviepy.audio.audioclip)": [[20, "moviepy.audio.AudioClip.CompositeAudioClip"]], "ends (moviepy.audio.audioclip.compositeaudioclip property)": [[20, "moviepy.audio.AudioClip.CompositeAudioClip.ends"]], "make_frame() (moviepy.audio.audioclip.compositeaudioclip method)": [[20, "moviepy.audio.AudioClip.CompositeAudioClip.make_frame"]], "starts (moviepy.audio.audioclip.compositeaudioclip property)": [[20, "moviepy.audio.AudioClip.CompositeAudioClip.starts"]], "concatenate_audioclips() (in module moviepy.audio.audioclip)": [[21, "moviepy.audio.AudioClip.concatenate_audioclips"]], "moviepy.audio.fx": [[22, "module-moviepy.audio.fx"]], "audiodelay (class in moviepy.audio.fx.audiodelay)": [[23, "moviepy.audio.fx.AudioDelay.AudioDelay"]], "apply() (moviepy.audio.fx.audiodelay.audiodelay method)": [[23, "moviepy.audio.fx.AudioDelay.AudioDelay.apply"]], "copy() (moviepy.audio.fx.audiodelay.audiodelay method)": [[23, "moviepy.audio.fx.AudioDelay.AudioDelay.copy"]], "moviepy.audio.fx.audiodelay": [[23, "module-moviepy.audio.fx.AudioDelay"]], "audiofadein (class in moviepy.audio.fx.audiofadein)": [[24, "moviepy.audio.fx.AudioFadeIn.AudioFadeIn"]], "apply() (moviepy.audio.fx.audiofadein.audiofadein method)": [[24, "moviepy.audio.fx.AudioFadeIn.AudioFadeIn.apply"]], "copy() (moviepy.audio.fx.audiofadein.audiofadein method)": [[24, "moviepy.audio.fx.AudioFadeIn.AudioFadeIn.copy"]], "moviepy.audio.fx.audiofadein": [[24, "module-moviepy.audio.fx.AudioFadeIn"]], "audiofadeout (class in moviepy.audio.fx.audiofadeout)": [[25, "moviepy.audio.fx.AudioFadeOut.AudioFadeOut"]], "apply() (moviepy.audio.fx.audiofadeout.audiofadeout method)": [[25, "moviepy.audio.fx.AudioFadeOut.AudioFadeOut.apply"]], "copy() (moviepy.audio.fx.audiofadeout.audiofadeout method)": [[25, "moviepy.audio.fx.AudioFadeOut.AudioFadeOut.copy"]], "moviepy.audio.fx.audiofadeout": [[25, "module-moviepy.audio.fx.AudioFadeOut"]], "audioloop (class in moviepy.audio.fx.audioloop)": [[26, "moviepy.audio.fx.AudioLoop.AudioLoop"]], "apply() (moviepy.audio.fx.audioloop.audioloop method)": [[26, "moviepy.audio.fx.AudioLoop.AudioLoop.apply"]], "copy() (moviepy.audio.fx.audioloop.audioloop method)": [[26, "moviepy.audio.fx.AudioLoop.AudioLoop.copy"]], "moviepy.audio.fx.audioloop": [[26, "module-moviepy.audio.fx.AudioLoop"]], "audionormalize (class in moviepy.audio.fx.audionormalize)": [[27, "moviepy.audio.fx.AudioNormalize.AudioNormalize"]], "apply() (moviepy.audio.fx.audionormalize.audionormalize method)": [[27, "moviepy.audio.fx.AudioNormalize.AudioNormalize.apply"]], "copy() (moviepy.audio.fx.audionormalize.audionormalize method)": [[27, "moviepy.audio.fx.AudioNormalize.AudioNormalize.copy"]], "moviepy.audio.fx.audionormalize": [[27, "module-moviepy.audio.fx.AudioNormalize"]], "multiplystereovolume (class in moviepy.audio.fx.multiplystereovolume)": [[28, "moviepy.audio.fx.MultiplyStereoVolume.MultiplyStereoVolume"]], "apply() (moviepy.audio.fx.multiplystereovolume.multiplystereovolume method)": [[28, "moviepy.audio.fx.MultiplyStereoVolume.MultiplyStereoVolume.apply"]], "copy() (moviepy.audio.fx.multiplystereovolume.multiplystereovolume method)": [[28, "moviepy.audio.fx.MultiplyStereoVolume.MultiplyStereoVolume.copy"]], "moviepy.audio.fx.multiplystereovolume": [[28, "module-moviepy.audio.fx.MultiplyStereoVolume"]], "multiplyvolume (class in moviepy.audio.fx.multiplyvolume)": [[29, "moviepy.audio.fx.MultiplyVolume.MultiplyVolume"]], "apply() (moviepy.audio.fx.multiplyvolume.multiplyvolume method)": [[29, "moviepy.audio.fx.MultiplyVolume.MultiplyVolume.apply"]], "copy() (moviepy.audio.fx.multiplyvolume.multiplyvolume method)": [[29, "moviepy.audio.fx.MultiplyVolume.MultiplyVolume.copy"]], "moviepy.audio.fx.multiplyvolume": [[29, "module-moviepy.audio.fx.MultiplyVolume"]], "moviepy.audio.io": [[30, "module-moviepy.audio.io"]], "moviepy.audio.io.audiofileclip": [[31, "module-moviepy.audio.io.AudioFileClip"]], "audiofileclip (class in moviepy.audio.io.audiofileclip)": [[32, "moviepy.audio.io.AudioFileClip.AudioFileClip"]], "lifetime (moviepy.audio.io.audiofileclip.audiofileclip attribute)": [[32, "moviepy.audio.io.AudioFileClip.AudioFileClip.Lifetime"]], "buffersize (moviepy.audio.io.audiofileclip.audiofileclip attribute)": [[32, "moviepy.audio.io.AudioFileClip.AudioFileClip.buffersize"]], "close() (moviepy.audio.io.audiofileclip.audiofileclip method)": [[32, "moviepy.audio.io.AudioFileClip.AudioFileClip.close"]], "fps (moviepy.audio.io.audiofileclip.audiofileclip attribute)": [[32, "moviepy.audio.io.AudioFileClip.AudioFileClip.fps"]], "nbytes (moviepy.audio.io.audiofileclip.audiofileclip attribute)": [[32, "moviepy.audio.io.AudioFileClip.AudioFileClip.nbytes"]], "moviepy.audio.io.ffmpeg_audiowriter": [[33, "module-moviepy.audio.io.ffmpeg_audiowriter"]], "ffmpeg_audiowriter (class in moviepy.audio.io.ffmpeg_audiowriter)": [[34, "moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter"]], "close() (moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowriter method)": [[34, "moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.close"]], "write_frames() (moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowriter method)": [[34, "moviepy.audio.io.ffmpeg_audiowriter.FFMPEG_AudioWriter.write_frames"]], "ffmpeg_audiowrite() (in module moviepy.audio.io.ffmpeg_audiowriter)": [[35, "moviepy.audio.io.ffmpeg_audiowriter.ffmpeg_audiowrite"]], "moviepy.audio.io.ffplay_audiopreviewer": [[36, "module-moviepy.audio.io.ffplay_audiopreviewer"]], "ffplay_audiopreviewer (class in moviepy.audio.io.ffplay_audiopreviewer)": [[37, "moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer"]], "close() (moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreviewer method)": [[37, "moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.close"]], "write_frames() (moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreviewer method)": [[37, "moviepy.audio.io.ffplay_audiopreviewer.FFPLAY_AudioPreviewer.write_frames"]], "ffplay_audiopreview() (in module moviepy.audio.io.ffplay_audiopreviewer)": [[38, "moviepy.audio.io.ffplay_audiopreviewer.ffplay_audiopreview"]], "moviepy.audio.io.readers": [[39, "module-moviepy.audio.io.readers"]], "ffmpeg_audioreader (class in moviepy.audio.io.readers)": [[40, "moviepy.audio.io.readers.FFMPEG_AudioReader"]], "buffer_around() (moviepy.audio.io.readers.ffmpeg_audioreader method)": [[40, "moviepy.audio.io.readers.FFMPEG_AudioReader.buffer_around"]], "close() (moviepy.audio.io.readers.ffmpeg_audioreader method)": [[40, "moviepy.audio.io.readers.FFMPEG_AudioReader.close"]], "get_frame() (moviepy.audio.io.readers.ffmpeg_audioreader method)": [[40, "moviepy.audio.io.readers.FFMPEG_AudioReader.get_frame"]], "initialize() (moviepy.audio.io.readers.ffmpeg_audioreader method)": [[40, "moviepy.audio.io.readers.FFMPEG_AudioReader.initialize"]], "read_chunk() (moviepy.audio.io.readers.ffmpeg_audioreader method)": [[40, "moviepy.audio.io.readers.FFMPEG_AudioReader.read_chunk"]], "seek() (moviepy.audio.io.readers.ffmpeg_audioreader method)": [[40, "moviepy.audio.io.readers.FFMPEG_AudioReader.seek"]], "skip_chunk() (moviepy.audio.io.readers.ffmpeg_audioreader method)": [[40, "moviepy.audio.io.readers.FFMPEG_AudioReader.skip_chunk"]], "moviepy.audio.tools": [[41, "module-moviepy.audio.tools"]], "moviepy.audio.tools.cuts": [[42, "module-moviepy.audio.tools.cuts"]], "find_audio_period() (in module moviepy.audio.tools.cuts)": [[43, "moviepy.audio.tools.cuts.find_audio_period"]], "moviepy.config": [[44, "module-moviepy.config"]], "check() (in module moviepy.config)": [[45, "moviepy.config.check"]], "try_cmd() (in module moviepy.config)": [[46, "moviepy.config.try_cmd"]], "moviepy.decorators": [[47, "module-moviepy.decorators"]], "add_mask_if_none() (in module moviepy.decorators)": [[48, "moviepy.decorators.add_mask_if_none"]], "apply_to_audio() (in module moviepy.decorators)": [[49, "moviepy.decorators.apply_to_audio"]], "apply_to_mask() (in module moviepy.decorators)": [[50, "moviepy.decorators.apply_to_mask"]], "audio_video_effect() (in module moviepy.decorators)": [[51, "moviepy.decorators.audio_video_effect"]], "convert_masks_to_rgb() (in module moviepy.decorators)": [[52, "moviepy.decorators.convert_masks_to_RGB"]], "convert_parameter_to_seconds() (in module moviepy.decorators)": [[53, "moviepy.decorators.convert_parameter_to_seconds"]], "convert_path_to_string() (in module moviepy.decorators)": [[54, "moviepy.decorators.convert_path_to_string"]], "outplace() (in module moviepy.decorators)": [[55, "moviepy.decorators.outplace"]], "preprocess_args() (in module moviepy.decorators)": [[56, "moviepy.decorators.preprocess_args"]], "requires_duration() (in module moviepy.decorators)": [[57, "moviepy.decorators.requires_duration"]], "requires_fps() (in module moviepy.decorators)": [[58, "moviepy.decorators.requires_fps"]], "use_clip_fps_by_default() (in module moviepy.decorators)": [[59, "moviepy.decorators.use_clip_fps_by_default"]], "moviepy.tools": [[60, "module-moviepy.tools"]], "close_all_clips() (in module moviepy.tools)": [[61, "moviepy.tools.close_all_clips"]], "convert_to_seconds() (in module moviepy.tools)": [[62, "moviepy.tools.convert_to_seconds"]], "cross_platform_popen_params() (in module moviepy.tools)": [[63, "moviepy.tools.cross_platform_popen_params"]], "deprecated_version_of() (in module moviepy.tools)": [[64, "moviepy.tools.deprecated_version_of"]], "find_extension() (in module moviepy.tools)": [[65, "moviepy.tools.find_extension"]], "no_display_available() (in module moviepy.tools)": [[66, "moviepy.tools.no_display_available"]], "subprocess_call() (in module moviepy.tools)": [[67, "moviepy.tools.subprocess_call"]], "moviepy.video": [[68, "module-moviepy.video"]], "moviepy.video.videoclip": [[69, "module-moviepy.video.VideoClip"]], "bitmapclip (class in moviepy.video.videoclip)": [[70, "moviepy.video.VideoClip.BitmapClip"]], "to_bitmap() (moviepy.video.videoclip.bitmapclip method)": [[70, "moviepy.video.VideoClip.BitmapClip.to_bitmap"]], "colorclip (class in moviepy.video.videoclip)": [[71, "moviepy.video.VideoClip.ColorClip"]], "datavideoclip (class in moviepy.video.videoclip)": [[72, "moviepy.video.VideoClip.DataVideoClip"]], "imageclip (class in moviepy.video.videoclip)": [[73, "moviepy.video.VideoClip.ImageClip"]], "image_transform() (moviepy.video.videoclip.imageclip method)": [[73, "moviepy.video.VideoClip.ImageClip.image_transform"]], "img (moviepy.video.videoclip.imageclip attribute)": [[73, "moviepy.video.VideoClip.ImageClip.img"]], "time_transform() (moviepy.video.videoclip.imageclip method)": [[73, "moviepy.video.VideoClip.ImageClip.time_transform"]], "transform() (moviepy.video.videoclip.imageclip method)": [[73, "moviepy.video.VideoClip.ImageClip.transform"]], "textclip (class in moviepy.video.videoclip)": [[74, "moviepy.video.VideoClip.TextClip"]], "updatedvideoclip (class in moviepy.video.videoclip)": [[75, "moviepy.video.VideoClip.UpdatedVideoClip"]], "videoclip (class in moviepy.video.videoclip)": [[76, "moviepy.video.VideoClip.VideoClip"]], "aspect_ratio (moviepy.video.videoclip.videoclip property)": [[76, "moviepy.video.VideoClip.VideoClip.aspect_ratio"]], "audio (moviepy.video.videoclip.videoclip attribute)": [[76, "moviepy.video.VideoClip.VideoClip.audio"]], "blit_on() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.blit_on"]], "copy() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.copy"]], "cropped() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.cropped"]], "display_in_notebook() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.display_in_notebook"]], "fill_array() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.fill_array"]], "h (moviepy.video.videoclip.videoclip property)": [[76, "moviepy.video.VideoClip.VideoClip.h"]], "image_transform() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.image_transform"]], "is_mask (moviepy.video.videoclip.videoclip attribute)": [[76, "moviepy.video.VideoClip.VideoClip.is_mask"]], "layer (moviepy.video.videoclip.videoclip attribute)": [[76, "moviepy.video.VideoClip.VideoClip.layer"]], "make_frame (moviepy.video.videoclip.videoclip attribute)": [[76, "moviepy.video.VideoClip.VideoClip.make_frame"]], "mask (moviepy.video.videoclip.videoclip attribute)": [[76, "moviepy.video.VideoClip.VideoClip.mask"]], "n_frames (moviepy.video.videoclip.videoclip property)": [[76, "moviepy.video.VideoClip.VideoClip.n_frames"]], "pos (moviepy.video.videoclip.videoclip attribute)": [[76, "moviepy.video.VideoClip.VideoClip.pos"]], "preview() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.preview"]], "relative_pos (moviepy.video.videoclip.videoclip attribute)": [[76, "moviepy.video.VideoClip.VideoClip.relative_pos"]], "resized() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.resized"]], "rotated() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.rotated"]], "save_frame() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.save_frame"]], "show() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.show"]], "size (moviepy.video.videoclip.videoclip attribute)": [[76, "moviepy.video.VideoClip.VideoClip.size"]], "to_imageclip() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.to_ImageClip"]], "to_rgb() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.to_RGB"]], "to_mask() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.to_mask"]], "w (moviepy.video.videoclip.videoclip property)": [[76, "moviepy.video.VideoClip.VideoClip.w"]], "with_add_mask() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_add_mask"]], "with_audio() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_audio"]], "with_layer() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_layer"]], "with_make_frame() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_make_frame"]], "with_mask() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_mask"]], "with_on_color() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_on_color"]], "with_opacity() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_opacity"]], "with_position() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_position"]], "with_sub_effects() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.with_sub_effects"]], "without_audio() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.without_audio"]], "write_gif() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.write_gif"]], "write_images_sequence() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.write_images_sequence"]], "write_videofile() (moviepy.video.videoclip.videoclip method)": [[76, "moviepy.video.VideoClip.VideoClip.write_videofile"]], "moviepy.video.compositing": [[77, "module-moviepy.video.compositing"]], "moviepy.video.compositing.compositevideoclip": [[78, "module-moviepy.video.compositing.CompositeVideoClip"]], "compositevideoclip (class in moviepy.video.compositing.compositevideoclip)": [[79, "moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip"]], "close() (moviepy.video.compositing.compositevideoclip.compositevideoclip method)": [[79, "moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.close"]], "make_frame() (moviepy.video.compositing.compositevideoclip.compositevideoclip method)": [[79, "moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.make_frame"]], "playing_clips() (moviepy.video.compositing.compositevideoclip.compositevideoclip method)": [[79, "moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip.playing_clips"]], "clips_array() (in module moviepy.video.compositing.compositevideoclip)": [[80, "moviepy.video.compositing.CompositeVideoClip.clips_array"]], "concatenate_videoclips() (in module moviepy.video.compositing.compositevideoclip)": [[81, "moviepy.video.compositing.CompositeVideoClip.concatenate_videoclips"]], "moviepy.video.fx": [[82, "module-moviepy.video.fx"]], "acceldecel (class in moviepy.video.fx.acceldecel)": [[83, "moviepy.video.fx.AccelDecel.AccelDecel"]], "apply() (moviepy.video.fx.acceldecel.acceldecel method)": [[83, "moviepy.video.fx.AccelDecel.AccelDecel.apply"]], "copy() (moviepy.video.fx.acceldecel.acceldecel method)": [[83, "moviepy.video.fx.AccelDecel.AccelDecel.copy"]], "moviepy.video.fx.acceldecel": [[83, "module-moviepy.video.fx.AccelDecel"]], "blackandwhite (class in moviepy.video.fx.blackandwhite)": [[84, "moviepy.video.fx.BlackAndWhite.BlackAndWhite"]], "apply() (moviepy.video.fx.blackandwhite.blackandwhite method)": [[84, "moviepy.video.fx.BlackAndWhite.BlackAndWhite.apply"]], "copy() (moviepy.video.fx.blackandwhite.blackandwhite method)": [[84, "moviepy.video.fx.BlackAndWhite.BlackAndWhite.copy"]], "moviepy.video.fx.blackandwhite": [[84, "module-moviepy.video.fx.BlackAndWhite"]], "blink (class in moviepy.video.fx.blink)": [[85, "moviepy.video.fx.Blink.Blink"]], "apply() (moviepy.video.fx.blink.blink method)": [[85, "moviepy.video.fx.Blink.Blink.apply"]], "copy() (moviepy.video.fx.blink.blink method)": [[85, "moviepy.video.fx.Blink.Blink.copy"]], "moviepy.video.fx.blink": [[85, "module-moviepy.video.fx.Blink"]], "crop (class in moviepy.video.fx.crop)": [[86, "moviepy.video.fx.Crop.Crop"]], "apply() (moviepy.video.fx.crop.crop method)": [[86, "moviepy.video.fx.Crop.Crop.apply"]], "copy() (moviepy.video.fx.crop.crop method)": [[86, "moviepy.video.fx.Crop.Crop.copy"]], "moviepy.video.fx.crop": [[86, "module-moviepy.video.fx.Crop"]], "crossfadein (class in moviepy.video.fx.crossfadein)": [[87, "moviepy.video.fx.CrossFadeIn.CrossFadeIn"]], "apply() (moviepy.video.fx.crossfadein.crossfadein method)": [[87, "moviepy.video.fx.CrossFadeIn.CrossFadeIn.apply"]], "copy() (moviepy.video.fx.crossfadein.crossfadein method)": [[87, "moviepy.video.fx.CrossFadeIn.CrossFadeIn.copy"]], "moviepy.video.fx.crossfadein": [[87, "module-moviepy.video.fx.CrossFadeIn"]], "crossfadeout (class in moviepy.video.fx.crossfadeout)": [[88, "moviepy.video.fx.CrossFadeOut.CrossFadeOut"]], "apply() (moviepy.video.fx.crossfadeout.crossfadeout method)": [[88, "moviepy.video.fx.CrossFadeOut.CrossFadeOut.apply"]], "copy() (moviepy.video.fx.crossfadeout.crossfadeout method)": [[88, "moviepy.video.fx.CrossFadeOut.CrossFadeOut.copy"]], "moviepy.video.fx.crossfadeout": [[88, "module-moviepy.video.fx.CrossFadeOut"]], "evensize (class in moviepy.video.fx.evensize)": [[89, "moviepy.video.fx.EvenSize.EvenSize"]], "apply() (moviepy.video.fx.evensize.evensize method)": [[89, "moviepy.video.fx.EvenSize.EvenSize.apply"]], "copy() (moviepy.video.fx.evensize.evensize method)": [[89, "moviepy.video.fx.EvenSize.EvenSize.copy"]], "moviepy.video.fx.evensize": [[89, "module-moviepy.video.fx.EvenSize"]], "fadein (class in moviepy.video.fx.fadein)": [[90, "moviepy.video.fx.FadeIn.FadeIn"]], "apply() (moviepy.video.fx.fadein.fadein method)": [[90, "moviepy.video.fx.FadeIn.FadeIn.apply"]], "copy() (moviepy.video.fx.fadein.fadein method)": [[90, "moviepy.video.fx.FadeIn.FadeIn.copy"]], "moviepy.video.fx.fadein": [[90, "module-moviepy.video.fx.FadeIn"]], "fadeout (class in moviepy.video.fx.fadeout)": [[91, "moviepy.video.fx.FadeOut.FadeOut"]], "apply() (moviepy.video.fx.fadeout.fadeout method)": [[91, "moviepy.video.fx.FadeOut.FadeOut.apply"]], "copy() (moviepy.video.fx.fadeout.fadeout method)": [[91, "moviepy.video.fx.FadeOut.FadeOut.copy"]], "moviepy.video.fx.fadeout": [[91, "module-moviepy.video.fx.FadeOut"]], "freeze (class in moviepy.video.fx.freeze)": [[92, "moviepy.video.fx.Freeze.Freeze"]], "apply() (moviepy.video.fx.freeze.freeze method)": [[92, "moviepy.video.fx.Freeze.Freeze.apply"]], "copy() (moviepy.video.fx.freeze.freeze method)": [[92, "moviepy.video.fx.Freeze.Freeze.copy"]], "moviepy.video.fx.freeze": [[92, "module-moviepy.video.fx.Freeze"]], "freezeregion (class in moviepy.video.fx.freezeregion)": [[93, "moviepy.video.fx.FreezeRegion.FreezeRegion"]], "apply() (moviepy.video.fx.freezeregion.freezeregion method)": [[93, "moviepy.video.fx.FreezeRegion.FreezeRegion.apply"]], "copy() (moviepy.video.fx.freezeregion.freezeregion method)": [[93, "moviepy.video.fx.FreezeRegion.FreezeRegion.copy"]], "moviepy.video.fx.freezeregion": [[93, "module-moviepy.video.fx.FreezeRegion"]], "gammacorrection (class in moviepy.video.fx.gammacorrection)": [[94, "moviepy.video.fx.GammaCorrection.GammaCorrection"]], "apply() (moviepy.video.fx.gammacorrection.gammacorrection method)": [[94, "moviepy.video.fx.GammaCorrection.GammaCorrection.apply"]], "copy() (moviepy.video.fx.gammacorrection.gammacorrection method)": [[94, "moviepy.video.fx.GammaCorrection.GammaCorrection.copy"]], "moviepy.video.fx.gammacorrection": [[94, "module-moviepy.video.fx.GammaCorrection"]], "headblur (class in moviepy.video.fx.headblur)": [[95, "moviepy.video.fx.HeadBlur.HeadBlur"]], "apply() (moviepy.video.fx.headblur.headblur method)": [[95, "moviepy.video.fx.HeadBlur.HeadBlur.apply"]], "copy() (moviepy.video.fx.headblur.headblur method)": [[95, "moviepy.video.fx.HeadBlur.HeadBlur.copy"]], "moviepy.video.fx.headblur": [[95, "module-moviepy.video.fx.HeadBlur"]], "invertcolors (class in moviepy.video.fx.invertcolors)": [[96, "moviepy.video.fx.InvertColors.InvertColors"]], "apply() (moviepy.video.fx.invertcolors.invertcolors method)": [[96, "moviepy.video.fx.InvertColors.InvertColors.apply"]], "copy() (moviepy.video.fx.invertcolors.invertcolors method)": [[96, "moviepy.video.fx.InvertColors.InvertColors.copy"]], "moviepy.video.fx.invertcolors": [[96, "module-moviepy.video.fx.InvertColors"]], "loop (class in moviepy.video.fx.loop)": [[97, "moviepy.video.fx.Loop.Loop"]], "apply() (moviepy.video.fx.loop.loop method)": [[97, "moviepy.video.fx.Loop.Loop.apply"]], "copy() (moviepy.video.fx.loop.loop method)": [[97, "moviepy.video.fx.Loop.Loop.copy"]], "moviepy.video.fx.loop": [[97, "module-moviepy.video.fx.Loop"]], "lumcontrast (class in moviepy.video.fx.lumcontrast)": [[98, "moviepy.video.fx.LumContrast.LumContrast"]], "apply() (moviepy.video.fx.lumcontrast.lumcontrast method)": [[98, "moviepy.video.fx.LumContrast.LumContrast.apply"]], "copy() (moviepy.video.fx.lumcontrast.lumcontrast method)": [[98, "moviepy.video.fx.LumContrast.LumContrast.copy"]], "moviepy.video.fx.lumcontrast": [[98, "module-moviepy.video.fx.LumContrast"]], "makeloopable (class in moviepy.video.fx.makeloopable)": [[99, "moviepy.video.fx.MakeLoopable.MakeLoopable"]], "apply() (moviepy.video.fx.makeloopable.makeloopable method)": [[99, "moviepy.video.fx.MakeLoopable.MakeLoopable.apply"]], "copy() (moviepy.video.fx.makeloopable.makeloopable method)": [[99, "moviepy.video.fx.MakeLoopable.MakeLoopable.copy"]], "moviepy.video.fx.makeloopable": [[99, "module-moviepy.video.fx.MakeLoopable"]], "margin (class in moviepy.video.fx.margin)": [[100, "moviepy.video.fx.Margin.Margin"]], "add_margin() (moviepy.video.fx.margin.margin method)": [[100, "moviepy.video.fx.Margin.Margin.add_margin"]], "apply() (moviepy.video.fx.margin.margin method)": [[100, "moviepy.video.fx.Margin.Margin.apply"]], "copy() (moviepy.video.fx.margin.margin method)": [[100, "moviepy.video.fx.Margin.Margin.copy"]], "moviepy.video.fx.margin": [[100, "module-moviepy.video.fx.Margin"]], "maskcolor (class in moviepy.video.fx.maskcolor)": [[101, "moviepy.video.fx.MaskColor.MaskColor"]], "apply() (moviepy.video.fx.maskcolor.maskcolor method)": [[101, "moviepy.video.fx.MaskColor.MaskColor.apply"]], "copy() (moviepy.video.fx.maskcolor.maskcolor method)": [[101, "moviepy.video.fx.MaskColor.MaskColor.copy"]], "moviepy.video.fx.maskcolor": [[101, "module-moviepy.video.fx.MaskColor"]], "masksand (class in moviepy.video.fx.masksand)": [[102, "moviepy.video.fx.MasksAnd.MasksAnd"]], "apply() (moviepy.video.fx.masksand.masksand method)": [[102, "moviepy.video.fx.MasksAnd.MasksAnd.apply"]], "copy() (moviepy.video.fx.masksand.masksand method)": [[102, "moviepy.video.fx.MasksAnd.MasksAnd.copy"]], "moviepy.video.fx.masksand": [[102, "module-moviepy.video.fx.MasksAnd"]], "masksor (class in moviepy.video.fx.masksor)": [[103, "moviepy.video.fx.MasksOr.MasksOr"]], "apply() (moviepy.video.fx.masksor.masksor method)": [[103, "moviepy.video.fx.MasksOr.MasksOr.apply"]], "copy() (moviepy.video.fx.masksor.masksor method)": [[103, "moviepy.video.fx.MasksOr.MasksOr.copy"]], "moviepy.video.fx.masksor": [[103, "module-moviepy.video.fx.MasksOr"]], "mirrorx (class in moviepy.video.fx.mirrorx)": [[104, "moviepy.video.fx.MirrorX.MirrorX"]], "apply() (moviepy.video.fx.mirrorx.mirrorx method)": [[104, "moviepy.video.fx.MirrorX.MirrorX.apply"]], "copy() (moviepy.video.fx.mirrorx.mirrorx method)": [[104, "moviepy.video.fx.MirrorX.MirrorX.copy"]], "moviepy.video.fx.mirrorx": [[104, "module-moviepy.video.fx.MirrorX"]], "mirrory (class in moviepy.video.fx.mirrory)": [[105, "moviepy.video.fx.MirrorY.MirrorY"]], "apply() (moviepy.video.fx.mirrory.mirrory method)": [[105, "moviepy.video.fx.MirrorY.MirrorY.apply"]], "copy() (moviepy.video.fx.mirrory.mirrory method)": [[105, "moviepy.video.fx.MirrorY.MirrorY.copy"]], "moviepy.video.fx.mirrory": [[105, "module-moviepy.video.fx.MirrorY"]], "multiplycolor (class in moviepy.video.fx.multiplycolor)": [[106, "moviepy.video.fx.MultiplyColor.MultiplyColor"]], "apply() (moviepy.video.fx.multiplycolor.multiplycolor method)": [[106, "moviepy.video.fx.MultiplyColor.MultiplyColor.apply"]], "copy() (moviepy.video.fx.multiplycolor.multiplycolor method)": [[106, "moviepy.video.fx.MultiplyColor.MultiplyColor.copy"]], "moviepy.video.fx.multiplycolor": [[106, "module-moviepy.video.fx.MultiplyColor"]], "multiplyspeed (class in moviepy.video.fx.multiplyspeed)": [[107, "moviepy.video.fx.MultiplySpeed.MultiplySpeed"]], "apply() (moviepy.video.fx.multiplyspeed.multiplyspeed method)": [[107, "moviepy.video.fx.MultiplySpeed.MultiplySpeed.apply"]], "copy() (moviepy.video.fx.multiplyspeed.multiplyspeed method)": [[107, "moviepy.video.fx.MultiplySpeed.MultiplySpeed.copy"]], "moviepy.video.fx.multiplyspeed": [[107, "module-moviepy.video.fx.MultiplySpeed"]], "painting (class in moviepy.video.fx.painting)": [[108, "moviepy.video.fx.Painting.Painting"]], "apply() (moviepy.video.fx.painting.painting method)": [[108, "moviepy.video.fx.Painting.Painting.apply"]], "copy() (moviepy.video.fx.painting.painting method)": [[108, "moviepy.video.fx.Painting.Painting.copy"]], "moviepy.video.fx.painting": [[108, "module-moviepy.video.fx.Painting"]], "to_painting() (moviepy.video.fx.painting.painting method)": [[108, "moviepy.video.fx.Painting.Painting.to_painting"]], "resize (class in moviepy.video.fx.resize)": [[109, "moviepy.video.fx.Resize.Resize"]], "apply() (moviepy.video.fx.resize.resize method)": [[109, "moviepy.video.fx.Resize.Resize.apply"]], "copy() (moviepy.video.fx.resize.resize method)": [[109, "moviepy.video.fx.Resize.Resize.copy"]], "moviepy.video.fx.resize": [[109, "module-moviepy.video.fx.Resize"]], "resizer() (moviepy.video.fx.resize.resize method)": [[109, "moviepy.video.fx.Resize.Resize.resizer"]], "rotate (class in moviepy.video.fx.rotate)": [[110, "moviepy.video.fx.Rotate.Rotate"]], "apply() (moviepy.video.fx.rotate.rotate method)": [[110, "moviepy.video.fx.Rotate.Rotate.apply"]], "copy() (moviepy.video.fx.rotate.rotate method)": [[110, "moviepy.video.fx.Rotate.Rotate.copy"]], "moviepy.video.fx.rotate": [[110, "module-moviepy.video.fx.Rotate"]], "scroll (class in moviepy.video.fx.scroll)": [[111, "moviepy.video.fx.Scroll.Scroll"]], "apply() (moviepy.video.fx.scroll.scroll method)": [[111, "moviepy.video.fx.Scroll.Scroll.apply"]], "copy() (moviepy.video.fx.scroll.scroll method)": [[111, "moviepy.video.fx.Scroll.Scroll.copy"]], "moviepy.video.fx.scroll": [[111, "module-moviepy.video.fx.Scroll"]], "slidein (class in moviepy.video.fx.slidein)": [[112, "moviepy.video.fx.SlideIn.SlideIn"]], "apply() (moviepy.video.fx.slidein.slidein method)": [[112, "moviepy.video.fx.SlideIn.SlideIn.apply"]], "copy() (moviepy.video.fx.slidein.slidein method)": [[112, "moviepy.video.fx.SlideIn.SlideIn.copy"]], "moviepy.video.fx.slidein": [[112, "module-moviepy.video.fx.SlideIn"]], "slideout (class in moviepy.video.fx.slideout)": [[113, "moviepy.video.fx.SlideOut.SlideOut"]], "apply() (moviepy.video.fx.slideout.slideout method)": [[113, "moviepy.video.fx.SlideOut.SlideOut.apply"]], "copy() (moviepy.video.fx.slideout.slideout method)": [[113, "moviepy.video.fx.SlideOut.SlideOut.copy"]], "moviepy.video.fx.slideout": [[113, "module-moviepy.video.fx.SlideOut"]], "supersample (class in moviepy.video.fx.supersample)": [[114, "moviepy.video.fx.SuperSample.SuperSample"]], "apply() (moviepy.video.fx.supersample.supersample method)": [[114, "moviepy.video.fx.SuperSample.SuperSample.apply"]], "copy() (moviepy.video.fx.supersample.supersample method)": [[114, "moviepy.video.fx.SuperSample.SuperSample.copy"]], "moviepy.video.fx.supersample": [[114, "module-moviepy.video.fx.SuperSample"]], "timemirror (class in moviepy.video.fx.timemirror)": [[115, "moviepy.video.fx.TimeMirror.TimeMirror"]], "apply() (moviepy.video.fx.timemirror.timemirror method)": [[115, "moviepy.video.fx.TimeMirror.TimeMirror.apply"]], "copy() (moviepy.video.fx.timemirror.timemirror method)": [[115, "moviepy.video.fx.TimeMirror.TimeMirror.copy"]], "moviepy.video.fx.timemirror": [[115, "module-moviepy.video.fx.TimeMirror"]], "timesymmetrize (class in moviepy.video.fx.timesymmetrize)": [[116, "moviepy.video.fx.TimeSymmetrize.TimeSymmetrize"]], "apply() (moviepy.video.fx.timesymmetrize.timesymmetrize method)": [[116, "moviepy.video.fx.TimeSymmetrize.TimeSymmetrize.apply"]], "copy() (moviepy.video.fx.timesymmetrize.timesymmetrize method)": [[116, "moviepy.video.fx.TimeSymmetrize.TimeSymmetrize.copy"]], "moviepy.video.fx.timesymmetrize": [[116, "module-moviepy.video.fx.TimeSymmetrize"]], "moviepy.video.io": [[117, "module-moviepy.video.io"]], "moviepy.video.io.imagesequenceclip": [[118, "module-moviepy.video.io.ImageSequenceClip"]], "imagesequenceclip (class in moviepy.video.io.imagesequenceclip)": [[119, "moviepy.video.io.ImageSequenceClip.ImageSequenceClip"]], "moviepy.video.io.videofileclip": [[120, "module-moviepy.video.io.VideoFileClip"]], "videofileclip (class in moviepy.video.io.videofileclip)": [[121, "moviepy.video.io.VideoFileClip.VideoFileClip"]], "close() (moviepy.video.io.videofileclip.videofileclip method)": [[121, "moviepy.video.io.VideoFileClip.VideoFileClip.close"]], "filename (moviepy.video.io.videofileclip.videofileclip attribute)": [[121, "moviepy.video.io.VideoFileClip.VideoFileClip.filename"]], "fps (moviepy.video.io.videofileclip.videofileclip attribute)": [[121, "moviepy.video.io.VideoFileClip.VideoFileClip.fps"]], "moviepy.video.io.display_in_notebook": [[122, "module-moviepy.video.io.display_in_notebook"]], "html2() (in module moviepy.video.io.display_in_notebook)": [[123, "moviepy.video.io.display_in_notebook.HTML2"]], "display_in_notebook() (in module moviepy.video.io.display_in_notebook)": [[124, "moviepy.video.io.display_in_notebook.display_in_notebook"]], "html_embed() (in module moviepy.video.io.display_in_notebook)": [[125, "moviepy.video.io.display_in_notebook.html_embed"]], "moviepy.video.io.ffmpeg_reader": [[126, "module-moviepy.video.io.ffmpeg_reader"]], "ffmpeg_videoreader (class in moviepy.video.io.ffmpeg_reader)": [[127, "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader"]], "close() (moviepy.video.io.ffmpeg_reader.ffmpeg_videoreader method)": [[127, "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.close"]], "get_frame() (moviepy.video.io.ffmpeg_reader.ffmpeg_videoreader method)": [[127, "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.get_frame"]], "get_frame_number() (moviepy.video.io.ffmpeg_reader.ffmpeg_videoreader method)": [[127, "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.get_frame_number"]], "initialize() (moviepy.video.io.ffmpeg_reader.ffmpeg_videoreader method)": [[127, "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.initialize"]], "read_frame() (moviepy.video.io.ffmpeg_reader.ffmpeg_videoreader method)": [[127, "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.read_frame"]], "skip_frames() (moviepy.video.io.ffmpeg_reader.ffmpeg_videoreader method)": [[127, "moviepy.video.io.ffmpeg_reader.FFMPEG_VideoReader.skip_frames"]], "ffmpeginfosparser (class in moviepy.video.io.ffmpeg_reader)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser"]], "parse() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.parse"]], "parse_audio_stream_data() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.parse_audio_stream_data"]], "parse_data_by_stream_type() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.parse_data_by_stream_type"]], "parse_duration() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.parse_duration"]], "parse_fps() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.parse_fps"]], "parse_metadata_field_value() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.parse_metadata_field_value"]], "parse_tbr() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.parse_tbr"]], "parse_video_stream_data() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.parse_video_stream_data"]], "video_metadata_type_casting() (moviepy.video.io.ffmpeg_reader.ffmpeginfosparser method)": [[128, "moviepy.video.io.ffmpeg_reader.FFmpegInfosParser.video_metadata_type_casting"]], "ffmpeg_parse_infos() (in module moviepy.video.io.ffmpeg_reader)": [[129, "moviepy.video.io.ffmpeg_reader.ffmpeg_parse_infos"]], "ffmpeg_read_image() (in module moviepy.video.io.ffmpeg_reader)": [[130, "moviepy.video.io.ffmpeg_reader.ffmpeg_read_image"]], "moviepy.video.io.ffmpeg_tools": [[131, "module-moviepy.video.io.ffmpeg_tools"]], "ffmpeg_extract_audio() (in module moviepy.video.io.ffmpeg_tools)": [[132, "moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio"]], "ffmpeg_extract_subclip() (in module moviepy.video.io.ffmpeg_tools)": [[133, "moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip"]], "ffmpeg_merge_video_audio() (in module moviepy.video.io.ffmpeg_tools)": [[134, "moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio"]], "ffmpeg_resize() (in module moviepy.video.io.ffmpeg_tools)": [[135, "moviepy.video.io.ffmpeg_tools.ffmpeg_resize"]], "ffmpeg_stabilize_video() (in module moviepy.video.io.ffmpeg_tools)": [[136, "moviepy.video.io.ffmpeg_tools.ffmpeg_stabilize_video"]], "moviepy.video.io.ffmpeg_writer": [[137, "module-moviepy.video.io.ffmpeg_writer"]], "ffmpeg_videowriter (class in moviepy.video.io.ffmpeg_writer)": [[138, "moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter"]], "close() (moviepy.video.io.ffmpeg_writer.ffmpeg_videowriter method)": [[138, "moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.close"]], "write_frame() (moviepy.video.io.ffmpeg_writer.ffmpeg_videowriter method)": [[138, "moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter.write_frame"]], "ffmpeg_write_image() (in module moviepy.video.io.ffmpeg_writer)": [[139, "moviepy.video.io.ffmpeg_writer.ffmpeg_write_image"]], "ffmpeg_write_video() (in module moviepy.video.io.ffmpeg_writer)": [[140, "moviepy.video.io.ffmpeg_writer.ffmpeg_write_video"]], "moviepy.video.io.ffplay_previewer": [[141, "module-moviepy.video.io.ffplay_previewer"]], "ffplay_videopreviewer (class in moviepy.video.io.ffplay_previewer)": [[142, "moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer"]], "close() (moviepy.video.io.ffplay_previewer.ffplay_videopreviewer method)": [[142, "moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.close"]], "show_frame() (moviepy.video.io.ffplay_previewer.ffplay_videopreviewer method)": [[142, "moviepy.video.io.ffplay_previewer.FFPLAY_VideoPreviewer.show_frame"]], "ffplay_preview_video() (in module moviepy.video.io.ffplay_previewer)": [[143, "moviepy.video.io.ffplay_previewer.ffplay_preview_video"]], "moviepy.video.io.gif_writers": [[144, "module-moviepy.video.io.gif_writers"]], "write_gif_with_imageio() (in module moviepy.video.io.gif_writers)": [[145, "moviepy.video.io.gif_writers.write_gif_with_imageio"]], "moviepy.video.tools": [[146, "module-moviepy.video.tools"]], "moviepy.video.tools.credits": [[147, "module-moviepy.video.tools.credits"]], "creditsclip (class in moviepy.video.tools.credits)": [[148, "moviepy.video.tools.credits.CreditsClip"]], "moviepy.video.tools.cuts": [[149, "module-moviepy.video.tools.cuts"]], "framesmatch (class in moviepy.video.tools.cuts)": [[150, "moviepy.video.tools.cuts.FramesMatch"]], "framesmatches (class in moviepy.video.tools.cuts)": [[151, "moviepy.video.tools.cuts.FramesMatches"]], "best() (moviepy.video.tools.cuts.framesmatches method)": [[151, "moviepy.video.tools.cuts.FramesMatches.best"]], "filter() (moviepy.video.tools.cuts.framesmatches method)": [[151, "moviepy.video.tools.cuts.FramesMatches.filter"]], "from_clip() (moviepy.video.tools.cuts.framesmatches static method)": [[151, "moviepy.video.tools.cuts.FramesMatches.from_clip"]], "load() (moviepy.video.tools.cuts.framesmatches static method)": [[151, "moviepy.video.tools.cuts.FramesMatches.load"]], "save() (moviepy.video.tools.cuts.framesmatches method)": [[151, "moviepy.video.tools.cuts.FramesMatches.save"]], "select_scenes() (moviepy.video.tools.cuts.framesmatches method)": [[151, "moviepy.video.tools.cuts.FramesMatches.select_scenes"]], "write_gifs() (moviepy.video.tools.cuts.framesmatches method)": [[151, "moviepy.video.tools.cuts.FramesMatches.write_gifs"]], "detect_scenes() (in module moviepy.video.tools.cuts)": [[152, "moviepy.video.tools.cuts.detect_scenes"]], "find_video_period() (in module moviepy.video.tools.cuts)": [[153, "moviepy.video.tools.cuts.find_video_period"]], "moviepy.video.tools.drawing": [[154, "module-moviepy.video.tools.drawing"]], "blit() (in module moviepy.video.tools.drawing)": [[155, "moviepy.video.tools.drawing.blit"]], "circle() (in module moviepy.video.tools.drawing)": [[156, "moviepy.video.tools.drawing.circle"]], "color_gradient() (in module moviepy.video.tools.drawing)": [[157, "moviepy.video.tools.drawing.color_gradient"]], "color_split() (in module moviepy.video.tools.drawing)": [[158, "moviepy.video.tools.drawing.color_split"]], "moviepy.video.tools.interpolators": [[159, "module-moviepy.video.tools.interpolators"]], "interpolator (class in moviepy.video.tools.interpolators)": [[160, "moviepy.video.tools.interpolators.Interpolator"]], "trajectory (class in moviepy.video.tools.interpolators)": [[161, "moviepy.video.tools.interpolators.Trajectory"]], "addx() (moviepy.video.tools.interpolators.trajectory method)": [[161, "moviepy.video.tools.interpolators.Trajectory.addx"]], "addy() (moviepy.video.tools.interpolators.trajectory method)": [[161, "moviepy.video.tools.interpolators.Trajectory.addy"]], "from_file() (moviepy.video.tools.interpolators.trajectory static method)": [[161, "moviepy.video.tools.interpolators.Trajectory.from_file"]], "load_list() (moviepy.video.tools.interpolators.trajectory static method)": [[161, "moviepy.video.tools.interpolators.Trajectory.load_list"]], "save_list() (moviepy.video.tools.interpolators.trajectory static method)": [[161, "moviepy.video.tools.interpolators.Trajectory.save_list"]], "to_file() (moviepy.video.tools.interpolators.trajectory method)": [[161, "moviepy.video.tools.interpolators.Trajectory.to_file"]], "txy() (moviepy.video.tools.interpolators.trajectory method)": [[161, "moviepy.video.tools.interpolators.Trajectory.txy"]], "update_interpolators() (moviepy.video.tools.interpolators.trajectory method)": [[161, "moviepy.video.tools.interpolators.Trajectory.update_interpolators"]], "moviepy.video.tools.subtitles": [[162, "module-moviepy.video.tools.subtitles"]], "subtitlesclip (class in moviepy.video.tools.subtitles)": [[163, "moviepy.video.tools.subtitles.SubtitlesClip"]], "in_subclip() (moviepy.video.tools.subtitles.subtitlesclip method)": [[163, "moviepy.video.tools.subtitles.SubtitlesClip.in_subclip"]], "match_expr() (moviepy.video.tools.subtitles.subtitlesclip method)": [[163, "moviepy.video.tools.subtitles.SubtitlesClip.match_expr"]], "write_srt() (moviepy.video.tools.subtitles.subtitlesclip method)": [[163, "moviepy.video.tools.subtitles.SubtitlesClip.write_srt"]], "file_to_subtitles() (in module moviepy.video.tools.subtitles)": [[164, "moviepy.video.tools.subtitles.file_to_subtitles"]]}}) \ No newline at end of file diff --git a/user_guide/compositing.html b/user_guide/compositing.html deleted file mode 100644 index 51c2a7fe4..000000000 --- a/user_guide/compositing.html +++ /dev/null @@ -1,745 +0,0 @@ - - - - - - - - - - - - Compositing multiple clips — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Compositing multiple clips#

-

Video composition, also known as non-linear editing, is the fact of mixing and playing several clips together in a new clip. This video is a good example of what compositing you can do with MoviePy:

-
- -
-

Note

-

Before starting, note that video clips generally carry an audio track and a mask, which are also clips. When you compose these clips together, the soundtrack and mask of the final clip are automatically generated by putting together the soundtracks and masks of the clips. -So most of the time you don’t need to worry about mixing the audio and masks.

-
-
-

Juxtaposing and concatenating clips#

-

Two simple ways of putting clips together is to concatenate them (to play them one after the other in a single long clip) or to juxtapose them (to put them side by side in a single larger clip).

-
-

Concatenating multiple clips#

-

Concatenation can be done very easily with the function concatenate_videoclips().

-
from moviepy import VideoFileClip, concatenate_videoclips
-
-# We load all the clips we want to concatenate
-clip1 = VideoFileClip("example.mp4")
-clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1)
-clip3 = VideoFileClip("example3.mp4")
-
-# We concatenate them and write the result
-final_clip = concatenate_videoclips([clip1, clip2, clip3])
-final_clip.write_videofile("final_clip.mp4")
-
-
-

The final_clip is a clip that plays the clips 1, 2, and 3 one after the other.

-
-

Note

-

The clips do not need to be the same size. If they arent’s they will all appear centered in a clip large enough to contain the biggest of them, with optionally a color of your choosing to fill the background.

-
-

For more info, see concatenate_videoclips().

-
-
-

Juxtaposing multiple clips#

-

Putting multiple clip side by side is done with clip_array():

-
from moviepy import VideoFileClip, clips_array, vfx
-
-# We will use the same clip and transform it in 3 ways
-clip1 = VideoFileClip("example.mp4").with_effects([vfx.Margin(10)])  # add 10px contour
-clip2 = clip1.with_effects([vfx.MirrorX()])  # Flip horizontaly
-clip3 = clip1.with_effects([vfx.MirrorY()])  # Flip verticaly
-clip4 = clip1.resized(0.6)  # downsize to 60% of original
-
-# The form of the final clip will depend of the shape of the array
-# We want our clip to be our 4 videos, 2x2, so we make an array of 2x2
-final_clip = clips_array([[clip1, clip2], [clip3, clip4]])
-final_clip = final_clip.resized(
-    width=480
-)  # We resize the resulting clip to have the dimensions we want
-
-final_clip.write_videofile("final_clip.mp4")
-
-
-

You obtain a clip which looks like this:

-
-../_images/stacked.jpeg -
-

For more info, see clip_array().

-
-
-
-

More complex video compositing#

-

The CompositeVideoClip class is the base of all video compositing. -For example, internally, both concatenate_videoclips() and clip_array() create a CompositeVideoClip.

-

It provides a very flexible way to compose clips, by playing multiple clip on top of of each other, in the order they have been passed to CompositeVideoClip, here’s an example :

-
from moviepy import *
-
-# We load all the clips we want to compose
-clip1 = VideoFileClip("example.mp4")
-clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1)
-clip3 = VideoFileClip("example3.mp4")
-
-# We concatenate them and write theme stacked on top of each other, with clip3 over clip2 over clip1
-final_clip = CompositeVideoClip([clip1, clip2, clip3])
-final_clip.write_videofile("final_clip.mp4")
-
-
-

Now final_clip plays all clips at the same time, with clip3 over clip2 over clip1. It means that, if all clips have the same size, then only clip3, which is on top, will be visible in the video… -Unless clip3 and/or clip2 have masks which hide parts of them.

-
-

Note

-

Note that by default the composition has the size of its first clip (as it is generally a background). But sometimes you will want to make your clips float in a bigger composition. -To do so, just pass the size of the final composition as size parameter of CompositeVideoClip.

-
-

For now we have stacked multiple clip on top of each others, but this is obviously not enough for doing real video compositing. -For that, we will need to change when some clip start et stop to play, as well as define the x:y, position of thoses clips in the final video.

-

For more info, see CompositeVideoClip.

-
-

Changing starting and stopping times of clips#

-

In a CompositionClip, each clip start to play at a time that is specified by his clip.start attribute, and will play until clip.end.

-

So, considering that you would want to play clip1 for the first 6 seconds, clip2 5 seconds after the start of the video, and finally clip3 at the end of clip2, you would do as follows:

-
from moviepy import *
-
-# We load all the clips we want to compose
-clip1 = VideoFileClip("example.mp4")
-clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1)
-clip3 = VideoFileClip("example3.mp4")
-
-# We want to stop clip1 after 1s
-clip1 = clip1.with_end(1)
-
-# We want to play clip2 after 1.5s
-clip2 = clip2.with_start(1.5)
-
-# We want to play clip3 at the end of clip2, and so for 3 seconds only
-clip3 = clip3.with_start(clip2.end).with_duration(
-    1
-)  # Some times its more practical to modify the duration of a clip instead of his end
-
-# We write the result
-final_clip = CompositeVideoClip([clip1, clip2, clip3])
-final_clip.write_videofile("final_clip.mp4")
-
-
-
-

Note

-

When working with timing of your clip, you will frequently want to keep only parts of the original clip. -To do so, you should take a look at with_subclip() and with_cutout().

-
-
-
-

Positioning clips#

-

Frequently, you will want a smaller clip to appear on top of a larger one, and decide where it will appear in the composition by setting their position.

-

You can do so by using the with_position() method. The position is always defined from the top left corner, but you can define it -in many ways :

-
from moviepy import *
-
-# We load all the clips we want to compose
-clip1 = VideoFileClip("example.mp4")
-clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1)
-clip3 = VideoFileClip("example3.mp4")
-
-# We want to stop clip1 after 1s
-clip1 = clip1.with_end(1)
-
-# We want to play clip2 after 1.5s
-clip2 = clip2.with_start(1.5)
-
-# We want to play clip3 at the end of clip2, and so for 3 seconds only
-clip3 = clip3.with_start(clip2.end).with_duration(
-    1
-)  # Some times its more practical to modify the duration of a clip instead of his end
-
-# We write the result
-final_clip = CompositeVideoClip([clip1, clip2, clip3])
-final_clip.write_videofile("final_clip.mp4")
-
-
-

When indicating the position keep in mind that the y coordinate has its zero at the top of the picture:

-
-../_images/videoWH.jpeg -
-
-
-

Adding transitions effects#

-

The last part of composition is adding transition effects. For example, when a clip start while another is still playing, it would be nice to make the new one fadein instead of showing abruptly.

-

To do so, we can use the transitions offered by MoviePy in transitions, like crossfadein() :

-
from moviepy import *
-
-# We load all the clips we want to compose
-clip1 = VideoFileClip("example.mp4")
-clip2 = VideoFileClip("example2.mp4").with_subclip(0, 1)
-
-# Clip2 will be on top of clip1 for 1s
-clip1 = clip1.with_end(2)
-clip2 = clip2.with_start(1)
-
-# We will add a crossfadein on clip2 for 1s
-# As the other effects, transitions are added to Clip methods at runtime
-clip2 = clip2.with_effects([vfx.CrossFadeIn(1)])
-
-
-# We write the result
-final_clip = CompositeVideoClip([clip1, clip2])
-final_clip.write_videofile("final_clip.mp4")
-
-
-

MoviePy offer only few transitions in transitions. But technically, transitions are mostly effects applyed to the mask of a clip ! -That means you can actually use any of the already existing effects, and use them as transitions by applying them on the mask of your clip (see .

-

For more info, see transitions and moviepy.video.fx.

-
-

Compositing audio clips#

-

When you mix video clips together, MoviePy will automatically compose their respective audio tracks to form the audio track of the final clip, so you don’t need to worry about compositing these tracks yourself.

-

If you want to make a custom audiotrack from several audio sources, audio clips can be mixed together like video clips, with CompositeAudioClip and concatenate_audioclips():

-
from moviepy import *
-
-# We load all the clips we want to compose
-aclip1 = AudioFileClip("example.wav")
-aclip2 = AudioFileClip("example2.wav")
-aclip3 = AudioFileClip("example3.wav")
-
-# All clip will play one after the other
-concat = concatenate_audioclips([aclip1, aclip2, aclip3])
-
-# We will play aclip1, then ontop of it aclip2 after 5s, and the aclip3 on top of both after 9s
-compo = CompositeAudioClip(
-    [
-        aclip1.with_multiply_volume(1.2),
-        aclip2.with_start(5),  # start at t=5s
-        aclip3.with_start(9),
-    ]
-)
-
-
-
-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/user_guide/create_effects.html b/user_guide/create_effects.html deleted file mode 100644 index e0709219f..000000000 --- a/user_guide/create_effects.html +++ /dev/null @@ -1,587 +0,0 @@ - - - - - - - - - - - - Creating your own effects — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Creating your own effects#

-

In addition to the existings effects already offered by MoviePy, we can create our own effects to modify a clip as we want.

-
-

Why creating your own effects?#

-

For simple enough tasks, we’ve seen that we can Modify a clip apparence and timing using filters. Though it might be enough for simple tasks, filters are kind of limited:

-
    -
  • They can only access frame and/or timepoint

  • -
  • We cannot pass pass arguments to them

  • -
  • They are hard to maintain and re-use

  • -
-

To allow for more complexe and reusable clip modifications, we can create our own custom effects, that we will later apply with with_effects().

-

For example, imagine we want to add a progress bar to a clip, to do so we will not only need the time and image of the current frame, but also the total duration of the clip. -We will also probably want to be able to pass parameters to define the apparence of the progress bar, such as color or height. This is a perfect task for an effect!

-
-
-

Creating an effect#

-

In MoviePy, effects are objects of type moviepy.Effect.Effect, which is the base abstract class for all effects (kind of the same as Clip is the base for all VideoClip and AudioClip).

-

So, to create an effect, we will need to inherint the Effect class, and do two things:

-
    -
  • Create an __init__ method to be able to received the parameters of our effect.

  • -
  • Implement the inherited apply() method, which must take as an argument the clip we want to modify, and return the modified version.

  • -
-

In the end, your effect will probably use time_transform(), image_transform(), or transform() to really apply your modifications on the clip, -The main difference is, because your filter will be a method or an anonymous function inside your effect class, you will be able to access all properties of your object from it!

-

So, lets see how we could create our progress bar effect:

-
from moviepy import VideoClip
-from moviepy.decorators import requires_duration
-
-
-# Here you see a decorator that will verify if our clip have a duration
-# MoviePy offer a few of thoses that may come handy when writing your own effects
-@requires_duration
-def progress_bar(clip: VideoClip, color: tuple, height: int = 10):
-    """
-    Add a progress bar at the bottom of our clip
-
-     Parameters
-    ----------
-
-      color: Color of the bar as a RGB tuple
-      height: The height of the bar in pixels. Default = 10
-    """
-
-    # Because we have define the filter func inside our global effect,
-    # it have access to global effect scope and can use clip from inside filter
-    def filter(get_frame, t):
-        progression = t / clip.duration
-        bar_width = int(progression * clip.w)
-
-        # Showing a progress bar is just replacing bottom pixels on some part of our frame
-        frame = get_frame(t)
-        frame[-height:, 0:bar_width] = color
-
-        return frame
-
-    return clip.transform(filter, apply_to="mask")
-
-
-
-

Note

-

When creating an effect, you frequently have to write boilerplate code for assigning properties on object initialization, dataclasses is a nice way to limit that.

-
-

If you want to create your own effects, in addition of this documentation we strongly encourage you to go and take a look at the existing ones (see moviepy.video.fx and moviepy.audio.fx) to see how they works and take inspiration.

-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/user_guide/index.html b/user_guide/index.html deleted file mode 100644 index a9002274b..000000000 --- a/user_guide/index.html +++ /dev/null @@ -1,523 +0,0 @@ - - - - - - - - - - - - The MoviePy User Guide — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

The MoviePy User Guide#

-

The User Guide covers all of MoviePy main concepts grouped by tasks (loading, editing, composing, rendering), with a presentation of the differents concept/elements relative to the tasks, as well of short code example.

-

It is a good place for users whishing to understand more precisely one of these aspects and to discover the different MoviePy elements relative to it.

-

For users wanting to have a quick overview of how to use MoviePy, a better place to start is the Getting started with MoviePy section, and more specifically the MoviePy in 10 Minutes: Creating a Trailer from “Big Buck Bunny” tutorial.

-

For a full overview of MoviePy, see the Api Reference.

- -
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/user_guide/loading.html b/user_guide/loading.html deleted file mode 100644 index a46172901..000000000 --- a/user_guide/loading.html +++ /dev/null @@ -1,1050 +0,0 @@ - - - - - - - - - - - - Loading resources as clips — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Loading resources as clips#

-

The first step for making a video with MoviePy is to load the resources you wish to include in the final video.

-

In this section we present the different sorts of clips and how to load them. -For information on modifying a clip, see Modifying clips and apply effects. For how to put clips together see Compositing multiple clips. And for how to see/save theme, see Previewing and saving video clips (we will usually save them in example, but we wont explain here).

-

There’s a lot of different resources you can use with MoviePy, and you will load those differents resources with different subtypes of Clip, and more preciselly of AudioClip for any audio element, or VideoClip for any visual element.

-

The following code summarizes the base clips that you can create with moviepy:

-
from moviepy import *
-import numpy as np
-
-# Define some constants for later use
-black = (255, 255, 255)  # RGB for black
-# Random noise image of 200x100
-make_frame = lambda t: np.random.randint(low=0, high=255, size=(100, 200, 3))
-# A note by producing a sinewave of 440 Hz
-make_frame_audio = lambda t: np.sin(440 * 2 * np.pi * t)
-
-# Now lets see how to load different type of resources !
-
-# VIDEO CLIPS`
-clip = VideoClip(
-    make_frame, duration=5
-)  # for custom animations, where make_frame is a function returning an image as numpy array for a given time
-clip = VideoFileClip("example.mp4")  # for videos
-clip = ImageSequenceClip(
-    "example_img_dir", fps=24
-)  # for a list or directory of images to be used as a video sequence
-clip = ImageClip("example.png")  # For a picture
-clip = TextClip(
-    font="./example.ttf", text="Hello!", font_size=70, color="black"
-)  # To create the image of a text
-clip = ColorClip(
-    size=(460, 380), color=black
-)  # a clip of a single unified color, where color is a RGB tuple/array/list
-
-# AUDIO CLIPS
-clip = AudioFileClip(
-    "example.wav"
-)  # for audio files, but also videos where you only want the keep the audio track
-clip = AudioClip(
-    make_frame_audio, duration=3
-)  # for custom audio, where make_frame is a function returning a float (or tuple for stereo) for a given time
-
-
-

The best to understand all these clips more thoroughly is to read the full documentation for each in the Api Reference.

-
-

Realasing resources by closing a clip#

-

When you create some types of clip instances - e.g. VideoFileClip or AudioFileClip - MoviePy creates a subprocess and locks the file. In order to release those resources when you are finished you should call the close() method.

-

This is more important for more complex applications and is particularly important when running on Windows. While Python’s garbage collector should eventually clean up the resources for you, closing them makes them available earlier.

-

However, if you close a clip too early, methods on the clip (and any clips derived from it) become unsafe.

-

So, the rules of thumb are:

-
-
    -
  • Call close() on any clip that you construct once you have finished using it and have also finished using any clip that was derived from it.

  • -
  • Even if you close a CompositeVideoClip instance, you still need to close the clips it was created from.

  • -
  • Otherwise, if you have a clip that was created by deriving it from from another clip (e.g. by calling with_mask()), then generally you shouldn’t close it. Closing the original clip will also close the copy.

  • -
-
-

Clips act as context managers. This means you -can use them with a with statement, and they will automatically be closed at the end of the block, even if there is -an exception.

-
from moviepy import *
-
-try:
-    with AudioFileClip("example.wav") as clip:
-        raise Exception("Let's simulate an exception")
-except Exception as e:
-    print("{}".format(e))
-# clip.close() is implicitly called, so the lock on my_audiofile.mp3 file is immediately released.
-
-
-
-
-

Categories of video clips#

-

Video clips are the building blocks of longer videos. Technically, they are clips with a clip.get_frame(t) method which outputs a HxWx3 numpy array representing the frame of the clip at time t.

-

There are two main type of video clips:

-
    -
  • animated clips (made with VideoFileClip, VideoClip and ImageSequenceClip), which will always have duration.

  • -
  • unanimated clips (made with ImageClip, VideoClip`TextClip and ColorClip), which show the same picture for an a-priori infinite duration.

  • -
-

There are also special video clips called masks, which belong to the categories above but output greyscale frames indicating which parts of another clip are visible or not.

-

A video clip can carry around an audio clip (AudioClip) in audio which is its soundtrack, and a mask clip in mask.

-
-

Animated clips#

-

Thoses are clips whose image will change in time, and who have a duration and a number of Frames Per Second.

-
-

VideoClip#

-

VideoClip is the base class for all the other video clips in MoviePy. If all you want is to edit video files, you will never need it. This class is practical when you want to make animations from frames that are generated by another library. -All you need is to define a function make_frame(t) which returns a HxWx3 numpy array (of 8-bits integers) representing the frame at time t.

-

Here is an example where we will create a pulsating red circle with graphical library pillow.

-
from PIL import Image, ImageDraw
-import numpy as np
-from moviepy import *
-import math
-
-WIDTH, HEIGHT = (128, 128)
-RED = (255, 0, 0)
-
-
-def make_frame(t):
-    frequency = 1  # One pulse per second
-    coef = 0.5 * (1 + math.sin(2 * math.pi * frequency * t))  # radius varies over time
-    radius = WIDTH * coef
-
-    x1 = WIDTH / 2 - radius / 2
-    y1 = HEIGHT / 2 - radius / 2
-    x2 = WIDTH / 2 + radius / 2
-    y2 = HEIGHT / 2 + radius / 2
-
-    img = Image.new("RGB", (WIDTH, HEIGHT))
-    draw = ImageDraw.Draw(img)
-    draw.ellipse((x1, y1, x2, y2), fill=RED)
-
-    return np.array(img)  # returns a 8-bit RGB array
-
-
-clip = VideoClip(
-    make_frame, duration=2
-)  # we define a 2s duration for the clip to be able to render it later
-clip.write_gif(
-    "circle.gif", fps=15
-)  # we must set a framerate because VideoClip have no framerate by default
-
-
-

Resulting in this.

-A pulsating red circle on black background. -
-

Note

-

Clips that are made with a make_frame do not have an explicit frame rate nor duration by default, so you must provide duration at clip creation and a frame rate (fps, frames per second) for write_gif() and write_videofile(), and more generally for any methods that requires iterating through the frames.

-
-

For more, see VideoClip.

-
-
-

VideoFileClip#

-

A VideoFileClip is a clip read from a video file (most formats are supported) or a GIF file. This is probably one of the most used object ! You load the video as follows:

-
from moviepy import *
-
-myclip = VideoFileClip("example.mp4")
-
-# video file clips already have fps and duration
-print("Clip duration: {}".format(myclip.duration))
-print("Clip fps: {}".format(myclip.fps))
-
-myclip = myclip.with_subclip(0.5, 2)  # Cutting the clip between 0.5 and 2 secs.
-print("Clip duration: {}".format(myclip.duration))  # Cuting will update duration
-print("Clip fps: {}".format(myclip.fps))  # and keep fps
-
-myclip.write_videofile(
-    "result.mp4"
-)  # the output video will be 1.5 sec long and use original fps
-
-
-
-

Note

-

These clips will have an fps (frame per second) and duration attributes, which will be transmitted if you do small modifications of the clip, and will be used by default in write_gif(), write_videofile(), etc.

-
-

For more, see VideoFileClip.

-
-
-

ImageSequenceClip#

-

This ImageSequenceClip is a clip made from a series of images :

-
from moviepy import *
-
-# A clip with a list of images showed for 1 second each
-myclip = ImageSequenceClip(
-    [
-        "example_img_dir/image_0001.jpg",
-        "example_img_dir/image_0002.jpg",
-        "example_img_dir/image_0003.jpg",
-    ],
-    durations=[1, 1, 1],
-)
-print(
-    "Clip duration: {}".format(myclip.duration)
-)  # 3 images, 1 seconds each, duration = 3
-print("Clip fps: {}".format(myclip.fps))  # 3 seconds, 3 images, fps is 3/3 = 1
-
-# This time we will load all images in the dir, and instead of showing theme for X seconds, we will define FPS
-myclip2 = ImageSequenceClip("./example_img_dir", fps=30)
-print(
-    "Clip duration: {}".format(myclip2.duration)
-)  # fps = 30, so duration = nb images in dir / 30
-print("Clip fps: {}".format(myclip2.fps))  # fps = 30
-
-myclip.write_gif("result.gif")  # the gif will be 3 sec and 1 fps
-myclip2.write_gif(
-    "result2.gif"
-)  # the gif will be 30 fps, duration will vary based on number of images in dir
-
-
-

When creating an image sequence, sequence can be either a list of image names (that will be played in the provided order), a folder name (played in alphanumerical order), or a list of frames (Numpy arrays), obtained for instance from other clips.

-
-

Warning

-

All the images in list/folder/frames must be of the same size, or an exception will be raised

-
-

For more, see ImageSequenceClip.

-
-
-

DataVideoClip#

-

DataVideoClip is a video clip who take a list of datasets, a callback function, -and make each frame by iterating over dataset and invoking the callback function with the current data as first argument.

-

You will probably never use this. But if you do, think of it like a VideoClip, where you make frames not based on time, -but based on each entry of a data list.

-
from moviepy import *
-import numpy as np
-
-# Dataset will just be a list of colors as RGB
-dataset = [
-    (255, 0, 0),
-    (0, 255, 0),
-    (0, 0, 255),
-    (0, 255, 255),
-    (255, 0, 255),
-    (255, 255, 0),
-]
-
-
-# The function make frame take data and create an image of 200x100 px fill with the color
-def make_frame(data):
-    frame = np.full((100, 200, 3), data, dtype=np.uint8)
-    return frame
-
-
-# We create the DataVideoClip, and we set FPS at 2, making a 3s clip (because len(dataset) = 6, so 6/2=3)
-myclip = DataVideoClip(data=dataset, data_to_frame=make_frame, fps=2)
-
-# Modifying fps here will change video FPS, not clip FPS
-myclip.write_videofile("result.mp4", fps=30)
-
-
-

For more, see For more, see DataVideoClip.

-
-
-

UpdatedVideoClip#

-
-

Warning

-

This is really advanced usage, you will probably never need it, if you do, please go read the code.

-
-

UpdatedVideoClip is a video whose make_frame requires some objects to be updated before we can compute it.

-

This is particularly practical in science where some algorithm needs to make some steps before a new frame can be generated, or maybe when trying to make a video based on a live exterior context.

-

When you use this, you pass a world object to it. A world object is an object who respect thoses 3 rules :

-
    -
  1. It has a clip_t property, indicating the current world time.

  2. -
  3. It has an update() method, that will update the world state and is responsible for increasing clip_t when a new frame can be drown.

  4. -
  5. It has a to_frame() method, that will render a frame based on world current state.

  6. -
-

On get_frame() call, your UpdatedVideoClip will try to update the world until world.clip_t is superior or equal to frame time, then it will call world.to_frame().

-
from moviepy import *
-import numpy as np
-import random
-
-
-# Imagine we want to make a video that become more and more red as we repeat same face on coinflip in a row
-# because coinflip are done in real time, we need to wait until a winning row is done to be able
-# to make the next frame.
-# This is a world simulating that. Sorry, it's hard to come up with examples...
-class CoinFlipWorld:
-    def __init__(self, fps):
-        """
-        FPS is usefull because we must increment clip_t by 1/FPS to have UpdatedVideoClip run with a certain FPS
-
-        """
-        self.clip_t = 0
-        self.win_strike = 0
-        self.reset = False
-        self.fps = fps
-
-    def update(self):
-        if self.reset:
-            self.win_strike = 0
-            self.reset = False
-
-        print("strike : {}, clip_t : {}".format(self.win_strike, self.clip_t))
-        print(self.win_strike)
-
-        # 0 tails, 1 heads, this is our simulation of coinflip
-        choice = random.randint(0, 1)
-        face = random.randint(0, 1)
-
-        # We win, we increment our serie and retry
-        if choice == face:
-            self.win_strike += 1
-            return
-
-        # Different face, we increment clip_t and set reset so we will reset on next update.
-        # We dont reset immediately because we will need current state to make frame
-        self.reset = True
-        self.clip_t += 1 / self.fps
-
-    def to_frame(self):
-        red_intensity = 255 * (
-            self.win_strike / 10
-        )  # 100% red for 10 victories and more
-        red_intensity = min(red_intensity, 255)
-
-        # A 200x100 image with red more or less intense based on number of victories in a row
-        return np.full((100, 200, 3), (red_intensity, 0, 0), dtype=np.uint8)
-
-
-world = CoinFlipWorld(fps=5)
-
-myclip = UpdatedVideoClip(world=world, duration=10)
-# We will set FPS to same as world, if we was to use a different FPS, the lowest from world.fps and our write_videofile fps param
-# will be the real visible fps
-myclip.write_videofile("result.mp4", fps=5)
-
-
-
-
-
-

Unanimated clips#

-

Thoses are clips whose image will, at least before modifications, stay the same. By default they have no duration nor FPS. Meaning you will need to define thoses if you try to do operation needing such information (for example rendering).

-
-

ImageClip#

-

ImageClip is the base class for all unanimated clips, it’s a video clip that always displays the same image. Along with VideoFileClip it’s one of the most used kind of clip. -You can create one as follows:

-
from moviepy import *
-import numpy as np
-
-# Random RGB noise image of 200x100
-noise_image = np.random.randint(low=0, high=255, size=(100, 200, 3))
-
-myclip1 = ImageClip("example.png")  # You can create it from a path
-myclip2 = ImageClip(noise_image)  # from a (height x width x 3) RGB numpy array
-myclip3 = VideoFileClip("./example.mp4").to_ImageClip(
-    t="00:00:01"
-)  # Or load videoclip and extract frame at a given time
-
-
-

For more, see ImageClip.

-
-
-

TextClip#

-

A TextClip is a clip that will turn a text string into an image clip.

-

TextClip accept many parameters, letting you configure the apparence of the text, such as font and font size, -color, interlining, text alignement, etc.

-

The font you want to use must be an OpenType font, and you will set it by passing the path to the font file.

-

Here are a few example of using TextClip :

-
from moviepy import *
-
-font = "./example.ttf"
-
-# First we use as string and let system autocalculate clip dimensions to fit the text
-# we set clip duration to 2 secs, if we do not, it got an infinite duration
-txt_clip1 = TextClip(
-    font=font,
-    text="Hello World !",
-    font_size=30,
-    color="#FF0000",
-    bg_color="#FFFFFF",
-    duration=2,
-)  # Red
-
-# This time we load text from a file, we set a fixed size for clip and let the system find best font size,
-# allowing for line breaking
-txt_clip2 = TextClip(
-    font=font,
-    filename="./example.txt",
-    size=(500, 200),
-    bg_color="#FFFFFF",
-    method="caption",
-    color=(0, 0, 255, 127),
-)  # Blue with 50% transparency
-
-# we set duration, because by default image clip are infinite, and we cannot render infinite
-txt_clip2 = txt_clip2.with_duration(2)
-
-txt_clip1.write_videofile(
-    "result1.mp4", fps=24
-)  # ImageClip have no FPS either, so we must defined it
-txt_clip2.write_videofile("result2.mp4", fps=24)
-
-
-
-

Note

-

The parameter method let you define if text should be written and overflow if too long (label) or be automatically breaked (caption).

-
-

For a more detailed explaination of all the parameters, see TextClip.

-
-
-

ColorClip#

-

A ColorClip is a clip that will return an image of only one color. It is sometimes usefull when doing compositing (see Compositing multiple clips).

-
from moviepy import *
-
-myclip = ColorClip(
-    size=(200, 100), color=(255, 0, 0), duration=1
-)  # Color is passed as a RGB tuple
-myclip.write_videofile(
-    "result.mp4", fps=1
-)  # We really dont need more than 1 fps do we ?
-
-
-

For more, see ColorClip.

-
-
-
-

Mask clips#

-

Masks are a special kind of VideoClip with the property is_mask set to True. They can be attached to any other kind of VideoClip through method with_mask().

-

When a clip as a mask attached to it, this mask will indicate which pixels will be visible when the clip is composed with other clips (see Compositing multiple clips). Masks are also used to define transparency when you export the clip as GIF file or as a PNG.

-

The fundamental difference between masks and standard clips is that standard clips output frames with 3 components (R-G-B) per pixel, comprised between 0 and 255, while a mask has just one composant per pixel, between 0 and 1 (1 indicating a fully visible pixel and 0 a transparent pixel). Seen otherwise, a mask is always in greyscale.

-

When you create or load a clip that you will use as a mask you need to declare it. You can then attach it to a clip with the same dimensions :

-
from moviepy import *
-import numpy as np
-
-# Random RGB noise image of 200x100
-makeframe = lambda t: np.random.rand(100, 200)
-
-# To define the VideoClip as a mask, just pass parameter is_mask as True
-maskclip1 = VideoClip(makeframe, duration=4, is_mask=True)  # A random noise mask
-maskclip2 = ImageClip("example_mask.jpg", is_mask=True)  # A fixed mask as jpeg
-maskclip3 = VideoFileClip("example_mask.mp4", is_mask=True)  # A video as a mask
-
-# Load our basic clip, resize to 200x100 and apply each mask
-clip = VideoFileClip("example.mp4")
-clip_masked1 = clip.with_mask(maskclip1)
-clip_masked2 = clip.with_mask(maskclip2)
-clip_masked3 = clip.with_mask(maskclip3)
-
-
-
-

Note

-

In the case of video and image files, if these are not already black and white they will be converted automatically.

-

Also, when you load an image with an alpha layer, like a PNG, MoviePy will use this layer as a mask, except if you pass transparent=False.

-
-

Any video clip can be turned into a mask with to_mask(), and a mask can be turned to a standard RGB video clip with to_RGB().

-

Masks are treated differently by many methods (because their frames are different) but at the core, they are VideoClip, so you can do with theme everything you can do with a video clip: modify, cut, apply effects, save, etc.

-
-
-
-

Using audio elements with audio clips#

-

In addition to VideoClip for visual, you can use audio elements, like an audio file, using the AudioClip class.

-

Both are quite similar, except AudioClip method get_frame() return a numpy array of size Nx1 for mono, and size Nx2 for stereo.

-
-

AudioClip#

-

AudioClip is the base class for all audio clips. If all you want is to edit audio files, you will never need it.

-

All you need is to define a function make_frame(t) which returns a Nx1 or Nx2 numpy array representing the sound at time t.

-
from moviepy import *
-import numpy as np
-
-# Producing a sinewave of 440 Hz -> note A
-make_frame_audio = lambda t: np.sin(440 * 2 * np.pi * t)
-
-# AUDIO CLIPS
-clip = AudioClip(make_frame_audio, duration=3)
-
-
-

For more, see AudioClip.

-
-
-

AudioFileClip#

-

AudioFileClip is used to load an audio file, this is probably the only kind of audio clip you will use.

-

You simply pass him the file you want to load :

-
from moviepy import *
-import numpy as np
-
-# Works for audio files, but also videos file where you only want the keep the audio track
-clip = AudioFileClip("example.wav")
-clip.write_audiofile("./result.wav")
-
-
-

For more, see AudioFileClip.

-
-
-

AudioArrayClip#

-

AudioArrayClip is used to turn an array representing a sound into an audio clip. You will probably never use it, unless you need to use the result of some third library without using a temporary file.

-

You need to provide a numpy array representing the sound (of size Nx1 for mono, Nx2 for stereo), and the number of fps, indicating the speed at which the sound is supposed to be played.

-
import numpy as np
-from moviepy import *
-
-# We want to play those notes
-notes = {"A": 440, "B": 494, "C": 523, "D": 587, "E": 659, "F": 698}
-
-note_duration = 0.5
-total_duration = len(notes) * note_duration
-sample_rate = 44100  # Number of samples per second
-
-note_size = int(note_duration * sample_rate)
-total_size = note_size * len(notes)
-
-
-def make_frame(t, note_frequency):
-    return np.sin(note_frequency * 2 * np.pi * t)
-
-
-# We generate all frames timepoints
-times = np.linspace(0, total_duration, total_size)
-
-# We make an array of size N*1, where N is the number of frames * total duration
-audio_array = np.zeros((total_size, 2))
-i = 0
-for note, frequency in notes.items():
-    for _ in range(note_size):
-        audio_array[i][0] = make_frame(times[i], frequency)
-        i += 1
-
-# Create an AudioArrayClip from the audio samples
-audio_clip = AudioArrayClip(audio_array, fps=sample_rate)
-
-# Write the audio clip to a WAV file
-audio_clip.write_audiofile("result.wav", fps=44100)
-
-
-

For more, see AudioArrayClip.

-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/user_guide/modifying.html b/user_guide/modifying.html deleted file mode 100644 index 156c4af99..000000000 --- a/user_guide/modifying.html +++ /dev/null @@ -1,741 +0,0 @@ - - - - - - - - - - - - Modifying clips and apply effects — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Modifying clips and apply effects#

-

Of course, once you will have loaded a Clip the next step of action will be to modify it to be able to integrate it in your final video.

-
-
To modify a clip, there is three main courses of actions :
    -
  • The built-in methods of VideoClip or AudioClip modifying the properties of the object.

  • -
  • The already-implemented effects of MoviePy you can apply on clips, usually affecting the clip by applying filters on each frame of the clip at rendering time.

  • -
  • The transformation filters that you can apply using transform() and time_transform().

  • -
-
-
-
-

How modifications are applied to a clip ?#

-
-

Clip copy during modification#

-

The first thing you must know is that when modifying a clip, MoviePy will never modify that clip directly. -Instead it will return a modified copy of the original and let the original untouched. This is known as out-place instead of in-place behavior.

-

To illustrate :

-
# Import everything needed to edit video clips
-from moviepy import *
-
-# Load example.mp4
-clip = VideoFileClip("example.mp4")
-
-# This does nothing, as multiply_volume will return a copy of clip which you will loose immediatly as you dont store it
-# If you was to render clip now, the audio would still be at full volume
-clip.with_multiply_volume(0.1)
-
-# This create a copy of clip in clip_whisper with a volume of only 10% the original, but does not modify the original clip
-# If you was to render clip right now, the audio would still be at full volume
-# If you was to render clip_whisper, the audio would be a 10% of the original volume
-clip_whisper = clip.with_multiply_volume(0.1)
-
-# This replace the original clip with a copy of it where volume is only 10% of the original
-# If you was to render clip now, the audio would be at 10%
-# The original clip is now lost
-clip = clip.with_multiply_volume(0.1)
-
-
-

This is an important point to understand, because it is one of the most recurrent source of bug for newcomers.

-
-
-

Memory consumption of effect and modifications#

-

When applying an effect or modification, it does not immediately apply the effect to all the frames of the clip, but only to the first frame: all the other frames will only be modified when required (that is, when you will write the whole clip to a file of when you will preview it).

-

It means that creating a new clip is neither time nor memory hungry, all the computation happen during the final rendering.

-
-
-

Time representations in MoviePy#

-

Many methods that we will see accept duration or timepoint as arguments. For instance clip.with_subclip(t_start, t_end) which cuts the clip between two timepoints.

-

MoviePy usually accept duration and timepoint as either :

-
    -
  • a number of seconds as a float.

  • -
  • a tuple with (minutes, seconds) or (hours, minutes, seconds).

  • -
  • a string such as '00:03:50.54'.

  • -
-

Also, you can usually provide negative times, indicating a time from the end of the clip. For example, clip.with_subclip(-20, -10) cuts the clip between 20s before the end and 10s before the end.

-
-
-
-

Modify a clip using the with_* methods#

-

The first way to modify a clip is by modifying internal properties of your object, thus modifying his behavior.

-

Thoses methods usually starts with the prefix with_ or without_, indicating that they will return a copy of the clip with the properties modified.

-

So, you may write something like :

-
from moviepy import VideoFileClip
-from moviepy import vfx, afx
-
-myclip = VideoFileClip("example.mp4")
-myclip = myclip.with_end(5)  # stop the clip after 5 sec
-myclip = myclip.without_audio()  # remove the audio of the clip
-
-
-

In addition to the with_* methods, a handful of very common methods are also accessible under shorter name, thoses are:

- -

For a list of all those methods, see Clip and VideoClip.

-
-
-

Modify a clip using effects#

-

The second way to modify a clip is by using effects that will modify the frames of the clip (which internally are no more than numpy arrays) by applying some sort of functions on them.

-

MoviePy come with many effects implemented in moviepy.video.fx for visual effects and moviepy.audio.fx for audio effects. -For practicality, these two modules are loaded in MoviePy as vfx and afx, letting you import them as from moviepy import vfx, afx.

-

To use thoses effects, you simply need to instanciate them as object and apply them on your Clip using method with_effects(), with a list of Effect objects you want to apply.

-

For convenience the effects are also dynamically added as method of VideoClip and AudioClip classes at runtime, letting you call them as simple method of your clip.

-

So, you may write something like :

-
from moviepy import VideoFileClip
-from moviepy import vfx, afx
-
-myclip = VideoFileClip("example.mp4")
-myclip = myclip.with_effects(
-    [vfx.Resize(width=460)]
-)  # resize clip to be 460px in width, keeping aspect ratio
-
-# fx method return a copy of the clip, so we can easily chain them
-myclip = myclip.with_effects(
-    [vfx.MultiplySpeed(2), afx.MultiplyVolume(0.5)]
-)  # double the speed and half the audio volume
-
-# because effects are added to Clip at runtime, you can also call them directly from your clip as methods
-myclip = myclip.with_effects([vfx.MultiplyColor(0.5)])  # darken the clip
-
-
-
-

Note

-

MoviePy effects are automatically applied to both the sound and the mask of the clip if it is relevant, so that you don’t have to worry about modifying these.

-
-

For a list of those effects, see moviepy.video.fx and moviepy.audio.fx.

-

In addition to the effects already provided by MoviePy, you can obviously Creating your own effects and use them the same way.

-
-
-

Modify a clip apparence and timing using filters#

-

In addition to modify a clip properties and using effects, you can also modify the apparence or timing of a clip by using your own custom filters with time_transform(), image_transform(), and more generally with transform().

-

All thoses methods works by taking as first parameter a callback function that will receive either a clip frame, a timepoint, or both, and return a modified version of thoses.

-
-

Modify only the timing of a Clip#

-

You can change the timeline of the clip with time_transform(your_filter). -Where your_filter is a callback function taking clip time as a parameter and returning a new time :

-
from moviepy import VideoFileClip
-import math
-
-my_clip = VideoFileClip("example.mp4")
-
-
-# You can define a function the classical way
-def accel_x3(time: float) -> float:
-    return time * 3
-
-
-modified_clip1 = my_clip.time_transform(accel_x3)
-
-# Of you can also use lambda function
-modified_clip2 = my_clip.time_transform(lambda t: 1 + math.sin(t))
-
-
-

Now the clip modified_clip1 plays three times faster than my_clip, while modified_clip2 will be oscillating between 00:00:00 to 00:00:02 of my_clip. Note that in the last case you have created a clip of infinite duration (which is not a problem for the moment).

-
-

Note

-

By default time_transform() will only modify the clip main frame, without modifying clip audio or mask for VideoClip.

-

If you wish to also modify audio and/or mask you can provide the parameter apply_to with either 'audio', 'mask', or ['audio', 'mask'].

-
-
-
-

Modifying only the apparence of a Clip#

-

For VideoClip, you can change the apparence of the clip with image_transform(your_filter). -Where your_filter is a callback function, taking clip frame (a numpy array) as a parameter and returning the transformed frame :

-
from moviepy import VideoFileClip
-import numpy
-
-my_clip = VideoFileClip("example.mp4")
-
-
-def invert_green_blue(image: numpy.ndarray) -> numpy.ndarray:
-    return image[:, :, [0, 2, 1]]
-
-
-modified_clip1 = my_clip.image_transform(invert_green_blue)
-
-
-

Now the clip modified_clip1 will have his green and blue canals inverted.

-
-

Note

-

You can define if transformation should be applied to audio and mask same as for time_transform().

-
-
-

Note

-

Sometimes need to treat clip frames and mask frames in a different way. To distinguish between the two, you can always look at their shape, clips are H*W*3, and masks H*W.

-
-
-
-

Modifying both the apparence and the timing of a Clip#

-

Finally, you may want to process the clip by taking into account both the time and the frame picture, for example to apply visual effects variating with time. -This is possible with the method transform(your_filter). -Where your_filter is a callback function taking two parameters, and returning a new frame picture. Where first argument is a get_frame method (i.e. a function get_frame(time) which given a time returns the clip’s frame at that time), and the second argument is the time.

-
from moviepy import VideoFileClip
-import math
-
-my_clip = VideoFileClip("example.mp4")
-
-
-def scroll(get_frame, t):
-    """
-    This function returns a 'region' of the current frame.
-    The position of this region depends on the time.
-    """
-    frame = get_frame(t)
-    frame_region = frame[int(t) : int(t) + 360, :]
-    return frame_region
-
-
-modified_clip1 = my_clip.transform(scroll)
-
-
-

This will scroll down the clip, with a constant height of 360 pixels.

-
-

Note

-

You can define if transformation should be applied to audio and mask same as for time_transform().

-
-
-

Note

-

When programming a new effect, whenever it is possible, prefer using time_transform and image_transform instead of transform when implementing new effects. -The reason is that, though they both internally relly on transform when these effects are applied to ImageClip objects, MoviePy will recognize they only need to be applied once instead of on each frame, resulting in faster renderings.

-
-

To keep things simple, we have only addressed the case of VideoClip, but know that the same principle applies to AudioClip, except that instead of a picture frame, you will have an audio frame, which is also a numpy array.

-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/user_guide/rendering.html b/user_guide/rendering.html deleted file mode 100644 index b7a410743..000000000 --- a/user_guide/rendering.html +++ /dev/null @@ -1,754 +0,0 @@ - - - - - - - - - - - - Previewing and saving video clips — MoviePy documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
- - -
-
-

MoviePy v2.0 have introduced breaking changes, - see Updating from v1.X to v2.X for more info.

-
-
- - - - -
-
- -
- - - - - - - - - - - - - -
- -
- - -
-
- -
-
- -
- -
- - - - -
- -
- - -
-
- - - - - -
- -
-

Previewing and saving video clips#

-

Once you are down working with your clips, the last step will be to export the result into a video/image file, or sometimes to simply preview it in order to verify everything is working as expected.

-
-

Previewing a clip#

-

When you are working with a clip, you will frequently need to have a peak at what your clip looks like, either to verify that everything is working as intended, or to check how things looks.

-

To do so you could render your entire clip into a file, but that’s a pretty long task, and you only need a quick look, so a better solution exists: previewing.

-
-

Preview a clip as a video#

-
-

Warning

-

You must have FFPLAY installed and accessible to MoviePy to be able to use preview(). -If you’r not sure, take a look Installation of additional binaries

-
-

The first thing you can do is to preview your clip as a video, by calling method preview() on your clip:

-
from moviepy import *
-
-myclip = VideoFileClip("./example.mp4").with_subclip(0, 1)  # Keep only 0 to 1 sec
-
-# We preview our clip as a video, inheriting FPS and audio of the original clip
-myclip.preview()
-
-# We preview our clip as video, but with a custom FPS for video and audio
-# making it less consuming for our computer
-myclip.preview(fps=5, audio_fps=11000)
-
-# Now we preview without audio
-myclip.preview(audio=False)
-
-
-

You will probably frequently want to preview only a small portion of your clip, though preview do not offer such capabilities, you can easily emulate such behavior by using with_subclip().

-
-

Note

-

It is quite frequent for a clip preview to be out of sync, or to play slower than it should. It means that your computer is not powerful enough to render the clip in real time.

-

Don’t hesitate to play with the options of preview: for instance, lower the fps of the sound (11000 Hz is still fine) and the video. Also, downsizing your video with resize can help.

-
-

For more info, see preview().

-
-

Note

-

A quite similar function is also available for AudioClip(), see ffplay_audiopreview().

-
-
-
-

Preview just one frame of a clip#

-

In a lot of situation, you dont really need to preview your all clip, seeing only one frame is enough to see how it looks like and to make sure everything goes as expected.

-

To do so, you can use the method show() on your clip, passing the frame time as an argument:

-
from moviepy import *
-
-myclip = VideoFileClip("./example.mp4")
-
-# We show the first frame of our clip
-myclip.show()
-
-# We show the frame at point 00:00:01.5 of our clip
-myclip.show(1.5)
-
-# We want to see our clip without applying his mask
-myclip.show(1.5, with_mask=False)
-
-
-

Contrary to video previewing, show does not require ffplay, but use pillow Image.show function.

-

For more info, see show().

-
-
-

Showing a clip in Jupyter Notebook#

-

If you work with a Jupyter Notebook, it can be very practical to display your clip the notebook. To do so, you can use the method display_in_notebook() on your clip.

-../_images/demo_preview.jpeg -

With display_in_notebook() you can embed videos, images and sounds, either from a file or directly from a clip:

-
from moviepy import *
-
-# ...
-# ... some jupyter specifics stuff
-# ...
-
-my_video_clip = VideoFileClip("./example.mp4")
-my_image_clip = ImageClip("./example.png")
-my_audio_clip = AudioFileClip("./example.wav")
-
-# We can show any type of clip
-my_video_clip.display_in_notebook()  # embeds a video
-my_image_clip.display_in_notebook()  # embeds an image
-my_audio_clip.display_in_notebook()  # embeds a sound
-
-# We can display only a snaphot of a video
-my_video_clip.display_in_notebook(t=1)
-
-# We can provide any valid HTML5 option as keyword argument
-# For instance, if the clip is too big, we can set width
-my_video_clip.display_in_notebook(width=400)
-
-# We can also make it loop, for example to check if a GIF is
-# looping as expected
-my_video_clip.display_in_notebook(autoplay=1, loop=1)
-
-
-
-

Warning

-

Know that display_in_notebook() will only work if it is on the last line a the notebook cell.

-

Also, note that display_in_notebook() actually embeds the clips physically in your notebook. The advantage is that you can move the notebook or put it online and the videos will work. -The drawback is that the file size of the notebook can become very large. Depending on your browser, re-computing and displaying at video many times can take some place in the cache and the RAM (it will only be a problem for intensive uses). -Restarting your browser solves the problem.

-
-

For more info, see display_in_notebook().

-
-
-
-

Save your clip into a file#

-

Once you are satisfied with how your clip looks, you can save it into a file, a step known in video edition as rendering. MoviePy offer various way to save your clip.

-
-

Video files (.mp4, .webm, .ogv…)#

-

The obvious first choice will be to write your clip to a video file, which you can do with write_videofile():

-
from moviepy import *
-
-# We load all the clips we want to compose
-background = VideoFileClip("long_examples/example2.mp4").with_subclip(0, 10)
-title = TextClip(
-    "./example.ttf",
-    text="Big Buck Bunny",
-    font_size=80,
-    color="#fff",
-    text_align="center",
-    duration=3,
-).with_position(("center", "center"))
-
-# We make our final clip through composition
-final_clip = CompositeVideoClip([background, title])
-
-# And finally we can write the result into a file
-
-# Here we just save as MP4, inheriting FPS, etc. from final_clip
-final_clip.write_videofile("result.mp4")
-
-# Here we save as MP4, but we set the FPS of the clip to our own, here 24 fps, like cinema
-final_clip.write_videofile("result24fps.mp4", fps=24)
-
-# Now we save as WEBM instead, and we want tu use codec libvpx-vp9 (usefull when mp4 + transparency).
-# We also want ffmpeg compression optimisation as minimal as possible. This will not change
-# the video quality and it will decrease time for encoding, but increase final file size a lot.
-# Finally, we want ffmpeg to use 4 threads for video encoding. You should probably leave that
-# to default, as ffmpeg is already quite good at using the best setting on his own.
-final_clip.write_videofile(
-    "result.webm", codec="libvpx-vp9", fps=24, preset="ultrafast", threads=4
-)
-
-
-

MoviePy can find the a default codec name for the most common file extensions. If you want to use exotic formats or if you are not happy with the defaults you can provide the codec with codec='mpeg4' for instance.

-

There are many many options when you are writing a video (bitrate, parameters of the audio writing, file size optimization, number of processors to use, etc.), and we will not go in details into each. So, for more info, see write_videofile().

-
-

Note

-

Though you are encouraged to play with settings of write_videofile, know that lowering the optimization preset, or increasing the number of threads will not necessarly -improve the rendering time, as the bottleneck may be on MoviePy computation of each frame and not in ffmpeg encoding.

-

Also, know that it is possible to pass additional parameters to ffmpeg command line invoked by MoviePy by using the ffmpeg_params argument.

-
-

Sometimes it is impossible for MoviePy to guess the duration attribute of the clip (keep in mind that some clips, like ImageClips displaying a picture, have a priori an infinite duration). Then, the duration must be set manually with with_duration():

-
from moviepy import *
-
-# By default an ImageClip has no duration
-my_clip = ImageClip("example.png")
-
-try:
-    # This will fail! We cannot write a clip with no duration!
-    my_clip.write_videofile("result.mp4")
-except:
-    print("Cannot write a video without duration")
-
-# By calling with_duration on our clip, we fix the problem! We also need to set fps
-my_clip.with_duration(2).write_videofile("result.mp4", fps=1)
-
-
-
-

Note

-

A quite similar function is also available for AudioClip(), see write_audiofile().

-
-
-
-

Export a single frame of the clip#

-

As for previewing, sometimes you will need to export only one frame of a clip, for example to create the preview image of a video. You can do so with save_frame():

-
from moviepy import *
-
-# We load all the clips we want to compose
-myclip = VideoFileClip("example.mp4")
-myclip.save_frame("result.png", t=1)  # Save frame at 1 sec
-
-
-

For more info, see save_frame().

-
-
-

Animated GIFs#

-

In addition to writing video files, MoviePy also let you write GIF file with write_gif():

-
from moviepy import *
-
-myclip = VideoFileClip("example.mp4").with_subclip(0, 2)
-
-# Here we just save as GIF
-myclip.write_gif("result.gif")
-
-# Here we save as GIF, but we set the FPS of our GIF at 10
-myclip.write_gif("result.gif", fps=10)
-
-
-

For more info, see write_gif().

-
-
-

Export all the clip as images in a directory#

-

Lastly, you may wish to export an entire clip as an image sequence (multiple images in one directory, one image per frame). You can do so with the function write_images_sequence():

-
from moviepy import *
-import os
-
-myclip = VideoFileClip("example.mp4")
-
-# Here we just save in dir output with filename being his index (start at 0, then +1 for each frame)
-os.mkdir("./output")
-myclip.write_images_sequence("./output/%d.jpg")
-
-# We set the FPS of our GIF at 10, and we leftpad name with 0 up to 4 digits
-myclip.write_images_sequence("./output/%04d.jpg")
-
-
-

For more info, see write_images_sequence().

-
-
-
- - -
- - - - - -
- - - - - - -
-
- -
- -
-
-
- - - - - -
- - -
- - \ No newline at end of file diff --git a/v1.0.3/FAQ.html b/v1.0.3/FAQ.html new file mode 100644 index 000000000..0eeeddd4b --- /dev/null +++ b/v1.0.3/FAQ.html @@ -0,0 +1,253 @@ + + + + + + + + + + FAQ and troubleshooting — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

FAQ and troubleshooting

+

This section will fill up as MoviePy advances through the next steps of +development (currently on the roadmap: MoviePy Studio, MoviePy WebApp, MoviePy OS, MoviePy +Trust Inc., and the MoviePy Charity Fundation).

+
+

Common errors that are not bugs

+

These are very common errors which are not considered as bugs to be +solved (but you can still ask for this to change). If these answers +don’t work for you, please open a bug report on Github, or on the dedicated forum on Reddit, or on the librelist.

+
+

MoviePy generated a video that cannot be read by my favorite player.

+

Known reason: one of the video’s dimensions were not even, +for instance 720x405, and you used a MPEG4 codec like libx264 (default +in MoviePy). In this case the video generated uses a format that is +readable only on some readers like VLC.

+
+
+

I can’t seem to read any video with MoviePy

+

Known reason: you have a deprecated version of FFMPEG, install a recent version from the +website, not from your OS’s repositories ! (see Download and Installation).

+
+
+

Previewing videos make them slower than they are

+

It means that your computer is not good enough to render the clip in real time. Don’t hesitate to play with the options of preview: for instance, lower the fps of the sound (11000 Hz is still fine) and the video. Also, downsizing your video with resize can help.

+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/_images/circle.gif b/v1.0.3/_images/circle.gif similarity index 100% rename from _images/circle.gif rename to v1.0.3/_images/circle.gif diff --git a/v1.0.3/_images/compo_from_image.jpeg b/v1.0.3/_images/compo_from_image.jpeg new file mode 100644 index 000000000..bb55873a3 Binary files /dev/null and b/v1.0.3/_images/compo_from_image.jpeg differ diff --git a/_images/demo_preview.jpeg b/v1.0.3/_images/demo_preview.jpeg similarity index 100% rename from _images/demo_preview.jpeg rename to v1.0.3/_images/demo_preview.jpeg diff --git a/_static/medias/user_guide/demo_preview.jpeg b/v1.0.3/_images/demo_preview1.jpeg similarity index 100% rename from _static/medias/user_guide/demo_preview.jpeg rename to v1.0.3/_images/demo_preview1.jpeg diff --git a/_images/explanations.jpeg b/v1.0.3/_images/explanations.jpeg similarity index 100% rename from _images/explanations.jpeg rename to v1.0.3/_images/explanations.jpeg diff --git a/_images/logo.png b/v1.0.3/_images/logo.png similarity index 100% rename from _images/logo.png rename to v1.0.3/_images/logo.png diff --git a/_images/stacked.jpeg b/v1.0.3/_images/stacked.jpeg similarity index 100% rename from _images/stacked.jpeg rename to v1.0.3/_images/stacked.jpeg diff --git a/_images/videoWH.jpeg b/v1.0.3/_images/videoWH.jpeg similarity index 100% rename from _images/videoWH.jpeg rename to v1.0.3/_images/videoWH.jpeg diff --git a/v1.0.3/_modules/index.html b/v1.0.3/_modules/index.html new file mode 100644 index 000000000..2e1bf871f --- /dev/null +++ b/v1.0.3/_modules/index.html @@ -0,0 +1,255 @@ + + + + + + + + + + Overview: module code — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Overview: module code
  • + + +
  • + +
  • + +
+ + +
+
+ +
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/Clip.html b/v1.0.3/_modules/moviepy/Clip.html new file mode 100644 index 000000000..2d22b4c89 --- /dev/null +++ b/v1.0.3/_modules/moviepy/Clip.html @@ -0,0 +1,711 @@ + + + + + + + + + + moviepy.Clip — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.Clip
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.Clip

+"""
+This module implements the central object of MoviePy, the Clip, and
+all the methods that are common to the two subclasses of Clip, VideoClip
+and AudioClip.
+"""
+
+from copy import copy
+
+import numpy as np
+import proglog
+from tqdm import tqdm
+
+from moviepy.decorators import (apply_to_audio, apply_to_mask,
+                                convert_to_seconds, outplace,
+                                requires_duration, use_clip_fps_by_default)
+
+
+
[docs]class Clip: + + """ + + Base class of all clips (VideoClips and AudioClips). + + + Attributes + ----------- + + start: + When the clip is included in a composition, time of the + composition at which the clip starts playing (in seconds). + + end: + When the clip is included in a composition, time of the + composition at which the clip stops playing (in seconds). + + duration: + Duration of the clip (in seconds). Some clips are infinite, in + this case their duration will be ``None``. + + """ + + # prefix for all temporary video and audio files. + # You can overwrite it with + # >>> Clip._TEMP_FILES_PREFIX = "temp_" + + _TEMP_FILES_PREFIX = 'TEMP_MPY_' + + def __init__(self): + + self.start = 0 + self.end = None + self.duration = None + + self.memoize = False + self.memoized_t = None + self.memoize_frame = None + +
[docs] def copy(self): + """ Shallow copy of the clip. + + Returns a shallow copy of the clip whose mask and audio will + be shallow copies of the clip's mask and audio if they exist. + + This method is intensively used to produce new clips every time + there is an outplace transformation of the clip (clip.resize, + clip.subclip, etc.) + """ + + newclip = copy(self) + if hasattr(self, 'audio'): + newclip.audio = copy(self.audio) + if hasattr(self, 'mask'): + newclip.mask = copy(self.mask) + + return newclip
+ +
[docs] @convert_to_seconds(['t']) + def get_frame(self, t): + """ + Gets a numpy array representing the RGB picture of the clip at time t + or (mono or stereo) value for a sound clip + """ + # Coming soon: smart error handling for debugging at this point + if self.memoize: + if t == self.memoized_t: + return self.memoized_frame + else: + frame = self.make_frame(t) + self.memoized_t = t + self.memoized_frame = frame + return frame + else: + return self.make_frame(t)
+ +
[docs] def fl(self, fun, apply_to=None, keep_duration=True): + """ General processing of a clip. + + Returns a new Clip whose frames are a transformation + (through function ``fun``) of the frames of the current clip. + + Parameters + ----------- + + fun + A function with signature (gf,t -> frame) where ``gf`` will + represent the current clip's ``get_frame`` method, + i.e. ``gf`` is a function (t->image). Parameter `t` is a time + in seconds, `frame` is a picture (=Numpy array) which will be + returned by the transformed clip (see examples below). + + apply_to + Can be either ``'mask'``, or ``'audio'``, or + ``['mask','audio']``. + Specifies if the filter ``fl`` should also be applied to the + audio or the mask of the clip, if any. + + keep_duration + Set to True if the transformation does not change the + ``duration`` of the clip. + + Examples + -------- + + In the following ``newclip`` a 100 pixels-high clip whose video + content scrolls from the top to the bottom of the frames of + ``clip``. + + >>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :] + >>> newclip = clip.fl(fl, apply_to='mask') + + """ + if apply_to is None: + apply_to = [] + + #mf = copy(self.make_frame) + newclip = self.set_make_frame(lambda t: fun(self.get_frame, t)) + + if not keep_duration: + newclip.duration = None + newclip.end = None + + if isinstance(apply_to, str): + apply_to = [apply_to] + + for attr in apply_to: + a = getattr(newclip, attr, None) + if a is not None: + new_a = a.fl(fun, keep_duration=keep_duration) + setattr(newclip, attr, new_a) + + return newclip
+ +
[docs] def fl_time(self, t_func, apply_to=None, keep_duration=False): + """ + Returns a Clip instance playing the content of the current clip + but with a modified timeline, time ``t`` being replaced by another + time `t_func(t)`. + + Parameters + ----------- + + t_func: + A function ``t-> new_t`` + + apply_to: + Can be either 'mask', or 'audio', or ['mask','audio']. + Specifies if the filter ``fl`` should also be applied to the + audio or the mask of the clip, if any. + + keep_duration: + ``False`` (default) if the transformation modifies the + ``duration`` of the clip. + + Examples + -------- + + >>> # plays the clip (and its mask and sound) twice faster + >>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio']) + >>> + >>> # plays the clip starting at t=3, and backwards: + >>> newclip = clip.fl_time(lambda: 3-t) + + """ + if apply_to is None: + apply_to = [] + + return self.fl(lambda gf, t: gf(t_func(t)), apply_to, + keep_duration=keep_duration)
+ +
[docs] def fx(self, func, *args, **kwargs): + """ + + Returns the result of ``func(self, *args, **kwargs)``. + for instance + + >>> newclip = clip.fx(resize, 0.2, method='bilinear') + + is equivalent to + + >>> newclip = resize(clip, 0.2, method='bilinear') + + The motivation of fx is to keep the name of the effect near its + parameters, when the effects are chained: + + >>> from moviepy.video.fx import volumex, resize, mirrorx + >>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx ) + >>> # Is equivalent, but clearer than + >>> resize( volumex( mirrorx( clip ), 0.5), 0.3) + + """ + + return func(self, *args, **kwargs)
+ + + +
[docs] @apply_to_mask + @apply_to_audio + @convert_to_seconds(['t']) + @outplace + def set_start(self, t, change_end=True): + """ + Returns a copy of the clip, with the ``start`` attribute set + to ``t``, which can be expressed in seconds (15.35), in (min, sec), + in (hour, min, sec), or as a string: '01:03:05.35'. + + + If ``change_end=True`` and the clip has a ``duration`` attribute, + the ``end`` atrribute of the clip will be updated to + ``start+duration``. + + If ``change_end=False`` and the clip has a ``end`` attribute, + the ``duration`` attribute of the clip will be updated to + ``end-start`` + + These changes are also applied to the ``audio`` and ``mask`` + clips of the current clip, if they exist. + """ + + self.start = t + if (self.duration is not None) and change_end: + self.end = t + self.duration + elif self.end is not None: + self.duration = self.end - self.start
+ + + +
[docs] @apply_to_mask + @apply_to_audio + @convert_to_seconds(['t']) + @outplace + def set_end(self, t): + """ + Returns a copy of the clip, with the ``end`` attribute set to + ``t``, which can be expressed in seconds (15.35), in (min, sec), + in (hour, min, sec), or as a string: '01:03:05.35'. + Also sets the duration of the mask and audio, if any, + of the returned clip. + """ + self.end = t + if self.end is None: return + if self.start is None: + if self.duration is not None: + self.start = max(0, t - newclip.duration) + else: + self.duration = self.end - self.start
+ + + +
[docs] @apply_to_mask + @apply_to_audio + @convert_to_seconds(['t']) + @outplace + def set_duration(self, t, change_end=True): + """ + Returns a copy of the clip, with the ``duration`` attribute + set to ``t``, which can be expressed in seconds (15.35), in (min, sec), + in (hour, min, sec), or as a string: '01:03:05.35'. + Also sets the duration of the mask and audio, if any, of the + returned clip. + If change_end is False, the start attribute of the clip will + be modified in function of the duration and the preset end + of the clip. + """ + self.duration = t + + if change_end: + self.end = None if (t is None) else (self.start + t) + else: + if self.duration is None: + raise Exception("Cannot change clip start when new" + "duration is None") + self.start = self.end - t
+ + +
[docs] @outplace + def set_make_frame(self, make_frame): + """ + Sets a ``make_frame`` attribute for the clip. Useful for setting + arbitrary/complicated videoclips. + """ + self.make_frame = make_frame
+ +
[docs] @outplace + def set_fps(self, fps): + """ Returns a copy of the clip with a new default fps for functions like + write_videofile, iterframe, etc. """ + self.fps = fps
+ + +
[docs] @outplace + def set_ismask(self, ismask): + """ Says wheter the clip is a mask or not (ismask is a boolean)""" + self.ismask = ismask
+ +
[docs] @outplace + def set_memoize(self, memoize): + """ Sets wheter the clip should keep the last frame read in memory """ + self.memoize = memoize
+ +
[docs] @convert_to_seconds(['t']) + def is_playing(self, t): + """ + + If t is a time, returns true if t is between the start and + the end of the clip. t can be expressed in seconds (15.35), + in (min, sec), in (hour, min, sec), or as a string: '01:03:05.35'. + If t is a numpy array, returns False if none of the t is in + theclip, else returns a vector [b_1, b_2, b_3...] where b_i + is true iff tti is in the clip. + """ + + if isinstance(t, np.ndarray): + # is the whole list of t outside the clip ? + tmin, tmax = t.min(), t.max() + + if (self.end is not None) and (tmin >= self.end): + return False + + if tmax < self.start: + return False + + # If we arrive here, a part of t falls in the clip + result = 1 * (t >= self.start) + if self.end is not None: + result *= (t <= self.end) + return result + + else: + + return((t >= self.start) and + ((self.end is None) or (t < self.end)))
+ + +
[docs] @convert_to_seconds(['t_start', 't_end']) + @apply_to_mask + @apply_to_audio + def subclip(self, t_start=0, t_end=None): + """ + Returns a clip playing the content of the current clip + between times ``t_start`` and ``t_end``, which can be expressed + in seconds (15.35), in (min, sec), in (hour, min, sec), or as a + string: '01:03:05.35'. + If ``t_end`` is not provided, it is assumed to be the duration + of the clip (potentially infinite). + If ``t_end`` is a negative value, it is reset to + ``clip.duration + t_end. ``. For instance: :: + + >>> # cut the last two seconds of the clip: + >>> newclip = clip.subclip(0,-2) + + If ``t_end`` is provided or if the clip has a duration attribute, + the duration of the returned clip is set automatically. + + The ``mask`` and ``audio`` of the resulting subclip will be + subclips of ``mask`` and ``audio`` the original clip, if + they exist. + """ + + if t_start < 0: + # Make this more Python-like, a negative value means to move + # backward from the end of the clip + t_start = self.duration + t_start # Remember t_start is negative + + if (self.duration is not None) and (t_start > self.duration): + raise ValueError("t_start (%.02f) " % t_start + + "should be smaller than the clip's " + + "duration (%.02f)." % self.duration) + + newclip = self.fl_time(lambda t: t + t_start, apply_to=[]) + + if (t_end is None) and (self.duration is not None): + + t_end = self.duration + + elif (t_end is not None) and (t_end < 0): + + if self.duration is None: + + print("Error: subclip with negative times (here %s)" % (str((t_start, t_end))) + + " can only be extracted from clips with a ``duration``") + + else: + + t_end = self.duration + t_end + + if t_end is not None: + + newclip.duration = t_end - t_start + newclip.end = newclip.start + newclip.duration + + return newclip
+ + +
[docs] @apply_to_mask + @apply_to_audio + @convert_to_seconds(['ta', 'tb']) + def cutout(self, ta, tb): + """ + Returns a clip playing the content of the current clip but + skips the extract between ``ta`` and ``tb``, which can be + expressed in seconds (15.35), in (min, sec), in (hour, min, sec), + or as a string: '01:03:05.35'. + If the original clip has a ``duration`` attribute set, + the duration of the returned clip is automatically computed as + `` duration - (tb - ta)``. + + The resulting clip's ``audio`` and ``mask`` will also be cutout + if they exist. + """ + + fl = lambda t: t + (t >= ta)*(tb - ta) + newclip = self.fl_time(fl) + + if self.duration is not None: + + return newclip.set_duration(self.duration - (tb - ta)) + + else: + + return newclip
+ +
[docs] @requires_duration + @use_clip_fps_by_default + def iter_frames(self, fps=None, with_times = False, logger=None, + dtype=None): + """ Iterates over all the frames of the clip. + + Returns each frame of the clip as a HxWxN np.array, + where N=1 for mask clips and N=3 for RGB clips. + + This function is not really meant for video editing. + It provides an easy way to do frame-by-frame treatment of + a video, for fields like science, computer vision... + + The ``fps`` (frames per second) parameter is optional if the + clip already has a ``fps`` attribute. + + Use dtype="uint8" when using the pictures to write video, images... + + Examples + --------- + + >>> # prints the maximum of red that is contained + >>> # on the first line of each frame of the clip. + >>> from moviepy.editor import VideoFileClip + >>> myclip = VideoFileClip('myvideo.mp4') + >>> print ( [frame[0,:,0].max() + for frame in myclip.iter_frames()]) + """ + logger = proglog.default_bar_logger(logger) + for t in logger.iter_bar(t=np.arange(0, self.duration, 1.0/fps)): + frame = self.get_frame(t) + if (dtype is not None) and (frame.dtype != dtype): + frame = frame.astype(dtype) + if with_times: + yield t, frame + else: + yield frame
+ +
[docs] def close(self): + """ + Release any resources that are in use. + """ + + # Implementation note for subclasses: + # + # * Memory-based resources can be left to the garbage-collector. + # * However, any open files should be closed, and subprocesses + # should be terminated. + # * Be wary that shallow copies are frequently used. + # Closing a Clip may affect its copies. + # * Therefore, should NOT be called by __del__(). + pass
+ + # Support the Context Manager protocol, to ensure that resources are cleaned up. + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close()
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/audio/AudioClip.html b/v1.0.3/_modules/moviepy/audio/AudioClip.html new file mode 100644 index 000000000..a6670a71e --- /dev/null +++ b/v1.0.3/_modules/moviepy/audio/AudioClip.html @@ -0,0 +1,533 @@ + + + + + + + + + + moviepy.audio.AudioClip — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.audio.AudioClip
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.audio.AudioClip

+import os
+
+import numpy as np
+import proglog
+from tqdm import tqdm
+
+from moviepy.audio.io.ffmpeg_audiowriter import ffmpeg_audiowrite
+from moviepy.Clip import Clip
+from moviepy.decorators import requires_duration
+from moviepy.tools import deprecated_version_of, extensions_dict
+
+
+
[docs]class AudioClip(Clip): + """ Base class for audio clips. + + See ``AudioFileClip`` and ``CompositeSoundClip`` for usable classes. + + An AudioClip is a Clip with a ``make_frame`` attribute of + the form `` t -> [ f_t ]`` for mono sound and + ``t-> [ f1_t, f2_t ]`` for stereo sound (the arrays are Numpy arrays). + The `f_t` are floats between -1 and 1. These bounds can be + trespassed wihtout problems (the program will put the + sound back into the bounds at conversion time, without much impact). + + Parameters + ----------- + + make_frame + A function `t-> frame at time t`. The frame does not mean much + for a sound, it is just a float. What 'makes' the sound are + the variations of that float in the time. + + nchannels + Number of channels (one or two for mono or stereo). + + Examples + --------- + + >>> # Plays the note A (a sine wave of frequency 440HZ) + >>> import numpy as np + >>> make_frame = lambda t: 2*[ np.sin(440 * 2 * np.pi * t) ] + >>> clip = AudioClip(make_frame, duration=5) + >>> clip.preview() + + """ + + def __init__(self, make_frame=None, duration=None, fps=None): + Clip.__init__(self) + + if fps is not None: + self.fps = fps + + if make_frame is not None: + self.make_frame = make_frame + frame0 = self.get_frame(0) + if hasattr(frame0, '__iter__'): + self.nchannels = len(list(frame0)) + else: + self.nchannels = 1 + if duration is not None: + self.duration = duration + self.end = duration + +
[docs] @requires_duration + def iter_chunks(self, chunksize=None, chunk_duration=None, fps=None, + quantize=False, nbytes=2, logger=None): + """ Iterator that returns the whole sound array of the clip by chunks + """ + if fps is None: + fps = self.fps + logger = proglog.default_bar_logger(logger) + if chunk_duration is not None: + chunksize = int(chunk_duration*fps) + + totalsize = int(fps*self.duration) + + nchunks = totalsize // chunksize + 1 + + pospos = np.linspace(0, totalsize, nchunks + 1, endpoint=True, dtype=int) + + for i in logger.iter_bar(chunk=list(range(nchunks))): + size = pospos[i+1] - pospos[i] + assert(size <= chunksize) + tt = (1.0/fps)*np.arange(pospos[i], pospos[i+1]) + yield self.to_soundarray(tt, nbytes=nbytes, quantize=quantize, + fps=fps, buffersize=chunksize)
+ +
[docs] @requires_duration + def to_soundarray(self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000): + """ + Transforms the sound into an array that can be played by pygame + or written in a wav file. See ``AudioClip.preview``. + + Parameters + ------------ + + fps + Frame rate of the sound for the conversion. + 44100 for top quality. + + nbytes + Number of bytes to encode the sound: 1 for 8bit sound, + 2 for 16bit, 4 for 32bit sound. + + """ + if fps is None: + fps = self.fps + + stacker = np.vstack if self.nchannels == 2 else np.hstack + max_duration = 1.0 * buffersize / fps + if tt is None: + if self.duration > max_duration: + return stacker(self.iter_chunks(fps=fps, quantize=quantize, + nbytes=2, chunksize=buffersize)) + else: + tt = np.arange(0, self.duration, 1.0/fps) + """ + elif len(tt)> 1.5*buffersize: + nchunks = int(len(tt)/buffersize+1) + tt_chunks = np.array_split(tt, nchunks) + return stacker([self.to_soundarray(tt=ttc, buffersize=buffersize, fps=fps, + quantize=quantize, nbytes=nbytes) + for ttc in tt_chunks]) + """ + #print tt.max() - tt.min(), tt.min(), tt.max() + + snd_array = self.get_frame(tt) + + if quantize: + snd_array = np.maximum(-0.99, np.minimum(0.99, snd_array)) + inttype = {1: 'int8', 2: 'int16', 4: 'int32'}[nbytes] + snd_array = (2**(8*nbytes-1)*snd_array).astype(inttype) + + return snd_array
+ + def max_volume(self, stereo=False, chunksize=50000, logger=None): + + stereo = stereo and (self.nchannels == 2) + + maxi = np.array([0, 0]) if stereo else 0 + for chunk in self.iter_chunks(chunksize=chunksize,logger=logger): + maxi = np.maximum(maxi, abs(chunk).max(axis=0)) if stereo else max(maxi, abs(chunk).max()) + return maxi + +
[docs] @requires_duration + def write_audiofile(self, filename, fps=None, nbytes=2, buffersize=2000, + codec=None, bitrate=None, ffmpeg_params=None, + write_logfile=False, verbose=True, logger='bar'): + """ Writes an audio file from the AudioClip. + + + Parameters + ----------- + + filename + Name of the output file + + fps + Frames per second. If not set, it will try default to self.fps if + already set, otherwise it will default to 44100 + + nbytes + Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound) + + codec + Which audio codec should be used. If None provided, the codec is + determined based on the extension of the filename. Choose + 'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav. + + bitrate + Audio bitrate, given as a string like '50k', '500k', '3000k'. + Will determine the size and quality of the output file. + Note that it mainly an indicative goal, the bitrate won't + necessarily be the this in the output file. + + ffmpeg_params + Any additional parameters you would like to pass, as a list + of terms, like ['-option1', 'value1', '-option2', 'value2'] + + write_logfile + If true, produces a detailed logfile named filename + '.log' + when writing the file + + verbose + Boolean indicating whether to print infomation + + logger + Either 'bar' or None or any Proglog logger + + """ + if not fps: + if not self.fps: + fps = 44100 + else: + fps = self.fps + + if codec is None: + name, ext = os.path.splitext(os.path.basename(filename)) + try: + codec = extensions_dict[ext[1:]]['codec'][0] + except KeyError: + raise ValueError("MoviePy couldn't find the codec associated " + "with the filename. Provide the 'codec' " + "parameter in write_audiofile.") + + return ffmpeg_audiowrite(self, filename, fps, nbytes, buffersize, + codec=codec, bitrate=bitrate, + write_logfile=write_logfile, verbose=verbose, + ffmpeg_params=ffmpeg_params, + logger=logger)
+ + +# The to_audiofile method is replaced by the more explicit write_audiofile. +AudioClip.to_audiofile = deprecated_version_of(AudioClip.write_audiofile, + 'to_audiofile') +### + + +class AudioArrayClip(AudioClip): + """ + + An audio clip made from a sound array. + + Parameters + ----------- + + array + A Numpy array representing the sound, of size Nx1 for mono, + Nx2 for stereo. + + fps + Frames per second : speed at which the sound is supposed to be + played. + + """ + + def __init__(self, array, fps): + + Clip.__init__(self) + self.array = array + self.fps = fps + self.duration = 1.0 * len(array) / fps + + def make_frame(t): + """ complicated, but must be able to handle the case where t + is a list of the form sin(t) """ + + if isinstance(t, np.ndarray): + array_inds = (self.fps*t).astype(int) + in_array = (array_inds > 0) & (array_inds < len(self.array)) + result = np.zeros((len(t), 2)) + result[in_array] = self.array[array_inds[in_array]] + return result + else: + i = int(self.fps * t) + if i < 0 or i >= len(self.array): + return 0*self.array[0] + else: + return self.array[i] + + self.make_frame = make_frame + self.nchannels = len(list(self.get_frame(0))) + + +
[docs]class CompositeAudioClip(AudioClip): + + """ Clip made by composing several AudioClips. + + An audio clip made by putting together several audio clips. + + Parameters + ------------ + + clips + List of audio clips, which may start playing at different times or + together. If all have their ``duration`` attribute set, the + duration of the composite clip is computed automatically. + + """ + + def __init__(self, clips): + + Clip.__init__(self) + self.clips = clips + + ends = [c.end for c in self.clips] + self.nchannels = max([c.nchannels for c in self.clips]) + if not any([(e is None) for e in ends]): + self.duration = max(ends) + self.end = max(ends) + + def make_frame(t): + + played_parts = [c.is_playing(t) for c in self.clips] + + sounds = [c.get_frame(t - c.start)*np.array([part]).T + for c, part in zip(self.clips, played_parts) + if (part is not False)] + + if isinstance(t, np.ndarray): + zero = np.zeros((len(t), self.nchannels)) + + else: + zero = np.zeros(self.nchannels) + + return zero + sum(sounds) + + self.make_frame = make_frame
+ + +def concatenate_audioclips(clips): + """ + The clip with the highest FPS will be the FPS of the result clip. + """ + durations = [c.duration for c in clips] + tt = np.cumsum([0]+durations) # start times, and end time. + newclips = [c.set_start(t) for c, t in zip(clips, tt)] + + result = CompositeAudioClip(newclips).set_duration(tt[-1]) + + fpss = [c.fps for c in clips if getattr(c, 'fps', None)] + result.fps = max(fpss) if fpss else None + return result +
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/audio/fx/audio_fadein.html b/v1.0.3/_modules/moviepy/audio/fx/audio_fadein.html new file mode 100644 index 000000000..57f798ac3 --- /dev/null +++ b/v1.0.3/_modules/moviepy/audio/fx/audio_fadein.html @@ -0,0 +1,231 @@ + + + + + + + + + + moviepy.audio.fx.audio_fadein — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.audio.fx.audio_fadein
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.audio.fx.audio_fadein

+import numpy as np
+
+from moviepy.decorators import audio_video_fx
+
+
+
[docs]@audio_video_fx +def audio_fadein(clip, duration): + """ Return an audio (or video) clip that is first mute, then the + sound arrives progressively over ``duration`` seconds. """ + + def fading(gf,t): + gft = gf(t) + + if np.isscalar(t): + factor = min(1.0 * t / duration, 1) + factor = np.array([factor,factor]) + else: + factor = np.minimum(1.0 * t / duration, 1) + factor = np.vstack([factor,factor]).T + return factor * gft + return clip.fl(fading, keep_duration = True)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/audio/fx/audio_fadeout.html b/v1.0.3/_modules/moviepy/audio/fx/audio_fadeout.html new file mode 100644 index 000000000..c527e6657 --- /dev/null +++ b/v1.0.3/_modules/moviepy/audio/fx/audio_fadeout.html @@ -0,0 +1,233 @@ + + + + + + + + + + moviepy.audio.fx.audio_fadeout — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.audio.fx.audio_fadeout
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.audio.fx.audio_fadeout

+import numpy as np
+
+from moviepy.decorators import audio_video_fx, requires_duration
+
+
+
[docs]@audio_video_fx +@requires_duration +def audio_fadeout(clip, duration): + """ Return a sound clip where the sound fades out progressively + over ``duration`` seconds at the end of the clip. """ + + def fading(gf,t): + gft = gf(t) + + if np.isscalar(t): + factor = min(1.0 * (clip.duration - t) / duration, 1) + factor = np.array([factor,factor]) + else: + factor = np.minimum( 1.0 * (clip.duration - t) / duration, 1) + factor = np.vstack([factor,factor]).T + return factor * gft + + return clip.fl(fading, keep_duration = True)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/audio/fx/audio_loop.html b/v1.0.3/_modules/moviepy/audio/fx/audio_loop.html new file mode 100644 index 000000000..8c6aeaf04 --- /dev/null +++ b/v1.0.3/_modules/moviepy/audio/fx/audio_loop.html @@ -0,0 +1,238 @@ + + + + + + + + + + moviepy.audio.fx.audio_loop — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.audio.fx.audio_loop
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.audio.fx.audio_loop

+from ..AudioClip import concatenate_audioclips
+
+
+
[docs]def audio_loop(audioclip, nloops=None, duration=None): + """ Loops over an audio clip. + + Returns an audio clip that plays the given clip either + `nloops` times, or during `duration` seconds. + + Examples + ======== + + >>> from moviepy.editor import * + >>> videoclip = VideoFileClip('myvideo.mp4') + >>> music = AudioFileClip('music.ogg') + >>> audio = afx.audio_loop( music, duration=videoclip.duration) + >>> videoclip.set_audio(audio) + + """ + + if duration is not None: + + nloops = int( duration/ audioclip.duration)+1 + return concatenate_audioclips(nloops*[audioclip]).set_duration(duration) + + else: + + return concatenate_audioclips(nloops*[audioclip])
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/audio/fx/audio_normalize.html b/v1.0.3/_modules/moviepy/audio/fx/audio_normalize.html new file mode 100644 index 000000000..498318da4 --- /dev/null +++ b/v1.0.3/_modules/moviepy/audio/fx/audio_normalize.html @@ -0,0 +1,233 @@ + + + + + + + + + + moviepy.audio.fx.audio_normalize — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.audio.fx.audio_normalize
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.audio.fx.audio_normalize

+from moviepy.decorators import audio_video_fx
+
+from .volumex import volumex
+
+
+
[docs]@audio_video_fx +def audio_normalize(clip): + """ Return a clip whose volume is normalized to 0db. + + Return an audio (or video) clip whose audio volume is normalized + so that the maximum volume is at 0db, the maximum achievable volume. + + Examples + ======== + + >>> from moviepy.editor import * + >>> videoclip = VideoFileClip('myvideo.mp4').fx(afx.audio_normalize) + + """ + + + mv = clip.max_volume() + return volumex(clip, 1 / mv)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/audio/fx/volumex.html b/v1.0.3/_modules/moviepy/audio/fx/volumex.html new file mode 100644 index 000000000..63e55a332 --- /dev/null +++ b/v1.0.3/_modules/moviepy/audio/fx/volumex.html @@ -0,0 +1,230 @@ + + + + + + + + + + moviepy.audio.fx.volumex — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.audio.fx.volumex
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.audio.fx.volumex

+from moviepy.decorators import audio_video_fx
+
+
+
[docs]@audio_video_fx +def volumex(clip, factor): + """ Returns a clip with audio volume multiplied by the + value `factor`. Can be applied to both audio and video clips. + + This effect is loaded as a clip method when you use moviepy.editor, + so you can just write ``clip.volumex(2)`` + + Examples + --------- + + >>> newclip = volumex(clip, 2.0) # doubles audio volume + >>> newclip = clip.fx( volumex, 0.5) # half audio, use with fx + >>> newclip = clip.volumex(2) # only if you used "moviepy.editor" + """ + return clip.fl(lambda gf, t: factor * gf(t), + keep_duration=True)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/audio/io/AudioFileClip.html b/v1.0.3/_modules/moviepy/audio/io/AudioFileClip.html new file mode 100644 index 000000000..ba21fbdfd --- /dev/null +++ b/v1.0.3/_modules/moviepy/audio/io/AudioFileClip.html @@ -0,0 +1,300 @@ + + + + + + + + + + moviepy.audio.io.AudioFileClip — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.audio.io.AudioFileClip
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.audio.io.AudioFileClip

+from __future__ import division
+
+from moviepy.audio.AudioClip import AudioClip
+from moviepy.audio.io.readers import FFMPEG_AudioReader
+
+
+
[docs]class AudioFileClip(AudioClip): + + """ + An audio clip read from a sound file, or an array. + The whole file is not loaded in memory. Instead, only a portion is + read and stored in memory. this portion includes frames before + and after the last frames read, so that it is fast to read the sound + backward and forward. + + Parameters + ------------ + + filename + Either a soundfile name (of any extension supported by ffmpeg) + or an array representing a sound. If the soundfile is not a .wav, + it will be converted to .wav first, using the ``fps`` and + ``bitrate`` arguments. + + buffersize: + Size to load in memory (in number of frames) + + + Attributes + ------------ + + nbytes + Number of bits per frame of the original audio file. + + fps + Number of frames per second in the audio file + + buffersize + See Parameters. + + Lifetime + -------- + + Note that this creates subprocesses and locks files. If you construct one of these instances, you must call + close() afterwards, or the subresources will not be cleaned up until the process ends. + + If copies are made, and close() is called on one, it may cause methods on the other copies to fail. + + However, coreaders must be closed separately. + + Examples + ---------- + + >>> snd = AudioFileClip("song.wav") + >>> snd.close() + >>> snd = AudioFileClip("song.mp3", fps = 44100) + >>> second_reader = snd.coreader() + >>> second_reader.close() + >>> snd.close() + >>> with AudioFileClip(mySoundArray, fps=44100) as snd: # from a numeric array + >>> pass # Close is implicitly performed by context manager. + + """ + + def __init__(self, filename, buffersize=200000, nbytes=2, fps=44100): + + AudioClip.__init__(self) + + self.filename = filename + self.reader = FFMPEG_AudioReader(filename, fps=fps, nbytes=nbytes, + buffersize=buffersize) + self.fps = fps + self.duration = self.reader.duration + self.end = self.reader.duration + self.buffersize = self.reader.buffersize + + self.make_frame = lambda t: self.reader.get_frame(t) + self.nchannels = self.reader.nchannels + +
[docs] def coreader(self): + """ Returns a copy of the AudioFileClip, i.e. a new entrance point + to the audio file. Use copy when you have different clips + watching the audio file at different times. """ + return AudioFileClip(self.filename, self.buffersize)
+ +
[docs] def close(self): + """ Close the internal reader. """ + if self.reader: + self.reader.close_proc() + self.reader = None
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/decorators.html b/v1.0.3/_modules/moviepy/decorators.html new file mode 100644 index 000000000..7dee60223 --- /dev/null +++ b/v1.0.3/_modules/moviepy/decorators.html @@ -0,0 +1,345 @@ + + + + + + + + + + moviepy.decorators — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.decorators
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.decorators

+"""
+all decorators used in moviepy go there
+"""
+
+import decorator
+
+from moviepy.tools import cvsecs
+
+
+
[docs]@decorator.decorator +def outplace(f, clip, *a, **k): + """ Applies f(clip.copy(), *a, **k) and returns clip.copy()""" + newclip = clip.copy() + f(newclip, *a, **k) + return newclip
+ +
[docs]@decorator.decorator +def convert_masks_to_RGB(f, clip, *a, **k): + """ If the clip is a mask, convert it to RGB before running the function """ + if clip.ismask: + clip = clip.to_RGB() + return f(clip, *a, **k)
+ +
[docs]@decorator.decorator +def apply_to_mask(f, clip, *a, **k): + """ This decorator will apply the same function f to the mask of + the clip created with f """ + + newclip = f(clip, *a, **k) + if getattr(newclip, 'mask', None): + newclip.mask = f(newclip.mask, *a, **k) + return newclip
+ + + +
[docs]@decorator.decorator +def apply_to_audio(f, clip, *a, **k): + """ This decorator will apply the function f to the audio of + the clip created with f """ + + newclip = f(clip, *a, **k) + if getattr(newclip, 'audio', None): + newclip.audio = f(newclip.audio, *a, **k) + return newclip
+ + +
[docs]@decorator.decorator +def requires_duration(f, clip, *a, **k): + """ Raise an error if the clip has no duration.""" + + if clip.duration is None: + raise ValueError("Attribute 'duration' not set") + else: + return f(clip, *a, **k)
+ + + +
[docs]@decorator.decorator +def audio_video_fx(f, clip, *a, **k): + """ Use an audio function on a video/audio clip + + This decorator tells that the function f (audioclip -> audioclip) + can be also used on a video clip, at which case it returns a + videoclip with unmodified video and modified audio. + """ + + if hasattr(clip, "audio"): + newclip = clip.copy() + if clip.audio is not None: + newclip.audio = f(clip.audio, *a, **k) + return newclip + else: + return f(clip, *a, **k)
+ +
[docs]def preprocess_args(fun,varnames): + """ Applies fun to variables in varnames before launching the function """ + + def wrapper(f, *a, **kw): + if hasattr(f, "func_code"): + func_code = f.func_code # Python 2 + else: + func_code = f.__code__ # Python 3 + + names = func_code.co_varnames + new_a = [fun(arg) if (name in varnames) else arg + for (arg, name) in zip(a, names)] + new_kw = {k: fun(v) if k in varnames else v + for (k,v) in kw.items()} + return f(*new_a, **new_kw) + return decorator.decorator(wrapper)
+ + +
[docs]def convert_to_seconds(varnames): + "Converts the specified variables to seconds" + return preprocess_args(cvsecs, varnames)
+ + + +
[docs]@decorator.decorator +def add_mask_if_none(f, clip, *a, **k): + """ Add a mask to the clip if there is none. """ + if clip.mask is None: + clip = clip.add_mask() + return f(clip, *a, **k)
+ + + +
[docs]@decorator.decorator +def use_clip_fps_by_default(f, clip, *a, **k): + """ Will use clip.fps if no fps=... is provided in **k """ + + def fun(fps): + if fps is not None: + return fps + elif getattr(clip, 'fps', None): + return clip.fps + raise AttributeError("No 'fps' (frames per second) attribute specified" + " for function %s and the clip has no 'fps' attribute. Either" + " provide e.g. fps=24 in the arguments of the function, or define" + " the clip's fps with `clip.fps=24`" % f.__name__) + + + if hasattr(f, "func_code"): + func_code = f.func_code # Python 2 + else: + func_code = f.__code__ # Python 3 + + names = func_code.co_varnames[1:] + + new_a = [fun(arg) if (name=='fps') else arg + for (arg, name) in zip(a, names)] + new_kw = {k: fun(v) if k=='fps' else v + for (k,v) in k.items()} + + return f(clip, *new_a, **new_kw)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/tools.html b/v1.0.3/_modules/moviepy/tools.html new file mode 100644 index 000000000..c4a5cd26d --- /dev/null +++ b/v1.0.3/_modules/moviepy/tools.html @@ -0,0 +1,387 @@ + + + + + + + + + + moviepy.tools — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.tools
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.tools

+"""
+Misc. useful functions that can be used at many places in the program.
+"""
+import os
+import subprocess as sp
+import sys
+import warnings
+
+import proglog
+
+from .compat import DEVNULL
+
+
+
+def sys_write_flush(s):
+    """ Writes and flushes without delay a text in the console """
+    # Reason for not using `print` is that in some consoles "print" 
+    # commands get delayed, while stdout.flush are instantaneous, 
+    # so this method is better at providing feedback.
+    # See https://github.com/Zulko/moviepy/pull/485
+    sys.stdout.write(s)
+    sys.stdout.flush()
+
+
+def verbose_print(verbose, s):
+    """ Only prints s (with sys_write_flush) if verbose is True."""
+    if verbose:
+        sys_write_flush(s)
+
+
+def subprocess_call(cmd, logger='bar', errorprint=True):
+    """ Executes the given subprocess command.
+    
+    Set logger to None or a custom Proglog logger to avoid printings.
+    """
+    logger = proglog.default_bar_logger(logger)
+    logger(message='Moviepy - Running:\n>>> "+ " ".join(cmd)')
+
+    popen_params = {"stdout": DEVNULL,
+                    "stderr": sp.PIPE,
+                    "stdin": DEVNULL}
+
+    if os.name == "nt":
+        popen_params["creationflags"] = 0x08000000
+
+    proc = sp.Popen(cmd, **popen_params)
+
+    out, err = proc.communicate() # proc.wait()
+    proc.stderr.close()
+
+    if proc.returncode:
+        if errorprint:
+            logger(message='Moviepy - Command returned an error')
+        raise IOError(err.decode('utf8'))
+    else:
+        logger(message='Moviepy - Command successful')
+
+    del proc
+
+def is_string(obj):
+    """ Returns true if s is string or string-like object,
+    compatible with Python 2 and Python 3."""
+    try:
+        return isinstance(obj, basestring)
+    except NameError:
+        return isinstance(obj, str)
+
+
+def cvsecs(time):
+    """ Will convert any time into seconds. 
+    
+    If the type of `time` is not valid, 
+    it's returned as is. 
+
+    Here are the accepted formats::
+
+    >>> cvsecs(15.4)   # seconds 
+    15.4 
+    >>> cvsecs((1, 21.5))   # (min,sec) 
+    81.5 
+    >>> cvsecs((1, 1, 2))   # (hr, min, sec)  
+    3662  
+    >>> cvsecs('01:01:33.045') 
+    3693.045
+    >>> cvsecs('01:01:33,5')    # coma works too
+    3693.5
+    >>> cvsecs('1:33,5')    # only minutes and secs
+    99.5
+    >>> cvsecs('33.5')      # only secs
+    33.5
+    """
+    factors = (1, 60, 3600)
+    
+    if is_string(time):     
+        time = [float(f.replace(',', '.')) for f in time.split(':')]
+
+    if not isinstance(time, (tuple, list)):
+        return time
+
+    return sum(mult * part for mult, part in zip(factors, reversed(time)))
+
+
+def deprecated_version_of(f, oldname, newname=None):
+    """ Indicates that a function is deprecated and has a new name.
+
+    `f` is the new function, `oldname` the name of the deprecated
+    function, `newname` the name of `f`, which can be automatically
+    found.
+
+    Returns
+    ========
+
+    f_deprecated
+      A function that does the same thing as f, but with a docstring
+      and a printed message on call which say that the function is
+      deprecated and that you should use f instead.
+
+    Examples
+    =========
+
+    >>> # The badly named method 'to_file' is replaced by 'write_file'
+    >>> class Clip:
+    >>>    def write_file(self, some args):
+    >>>        # blablabla
+    >>>
+    >>> Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file')
+    """
+
+    if newname is None: newname = f.__name__
+
+    warning= ("The function ``%s`` is deprecated and is kept temporarily "
+              "for backwards compatibility.\nPlease use the new name, "
+              "``%s``, instead.")%(oldname, newname)
+
+    def fdepr(*a, **kw):
+        warnings.warn("MoviePy: " + warning, PendingDeprecationWarning)
+        return f(*a, **kw)
+    fdepr.__doc__ = warning
+
+    return fdepr
+
+
+# non-exhaustive dictionnary to store default informations.
+# any addition is most welcome.
+# Note that 'gif' is complicated to place. From a VideoFileClip point of view,
+# it is a video, but from a HTML5 point of view, it is an image.
+
+extensions_dict = { "mp4":  {'type':'video', 'codec':['libx264','libmpeg4', 'aac']},
+                    'ogv':  {'type':'video', 'codec':['libtheora']},
+                    'webm': {'type':'video', 'codec':['libvpx']},
+                    'avi':  {'type':'video'},
+                    'mov':  {'type':'video'},
+
+                    'ogg':  {'type':'audio', 'codec':['libvorbis']},
+                    'mp3':  {'type':'audio', 'codec':['libmp3lame']},
+                    'wav':  {'type':'audio', 'codec':['pcm_s16le', 'pcm_s24le', 'pcm_s32le']},
+                    'm4a':  {'type':'audio', 'codec':['libfdk_aac']}
+                  }
+
+for ext in ["jpg", "jpeg", "png", "bmp", "tiff"]:
+    extensions_dict[ext] = {'type':'image'}
+
+
+def find_extension(codec):
+    if codec in extensions_dict:
+        # codec is already the extension
+        return codec
+
+    for ext,infos in extensions_dict.items():
+        if codec in infos.get('codec', []):
+            return ext
+    raise ValueError(
+        "The audio_codec you chose is unknown by MoviePy. "
+        "You should report this. In the meantime, you can "
+        "specify a temp_audiofile with the right extension "
+        "in write_videofile."
+    )
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/VideoClip.html b/v1.0.3/_modules/moviepy/video/VideoClip.html new file mode 100644 index 000000000..109cb6c8e --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/VideoClip.html @@ -0,0 +1,1405 @@ + + + + + + + + + + moviepy.video.VideoClip — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.VideoClip
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.VideoClip

+"""
+This module implements VideoClip (base class for video clips) and its
+main subclasses:
+- Animated clips:     VideofileClip, ImageSequenceClip
+- Static image clips: ImageClip, ColorClip, TextClip,
+"""
+import os
+import subprocess as sp
+import tempfile
+import warnings
+
+import numpy as np
+import proglog
+from imageio import imread, imsave
+
+from ..Clip import Clip
+from ..compat import DEVNULL, string_types
+from ..config import get_setting
+from ..decorators import (add_mask_if_none, apply_to_mask,
+                          convert_masks_to_RGB, convert_to_seconds, outplace,
+                          requires_duration, use_clip_fps_by_default)
+from ..tools import (deprecated_version_of, extensions_dict, find_extension,
+                     is_string, subprocess_call)
+from .io.ffmpeg_writer import ffmpeg_write_video
+from .io.gif_writers import (write_gif, write_gif_with_image_io,
+                             write_gif_with_tempfiles)
+from .tools.drawing import blit
+
+
+
[docs]class VideoClip(Clip): + """Base class for video clips. + + See ``VideoFileClip``, ``ImageClip`` etc. for more user-friendly + classes. + + + Parameters + ----------- + + ismask + `True` if the clip is going to be used as a mask. + + + Attributes + ---------- + + size + The size of the clip, (width,heigth), in pixels. + + w, h + The width and height of the clip, in pixels. + + ismask + Boolean set to `True` if the clip is a mask. + + make_frame + A function ``t-> frame at time t`` where ``frame`` is a + w*h*3 RGB array. + + mask (default None) + VideoClip mask attached to this clip. If mask is ``None``, + The video clip is fully opaque. + + audio (default None) + An AudioClip instance containing the audio of the video clip. + + pos + A function ``t->(x,y)`` where ``x,y`` is the position + of the clip when it is composed with other clips. + See ``VideoClip.set_pos`` for more details + + relative_pos + See variable ``pos``. + + """ + + def __init__(self, make_frame=None, ismask=False, duration=None, + has_constant_size=True): + Clip.__init__(self) + self.mask = None + self.audio = None + self.pos = lambda t: (0, 0) + self.relative_pos = False + if make_frame: + self.make_frame = make_frame + self.size = self.get_frame(0).shape[:2][::-1] + self.ismask = ismask + self.has_constant_size=has_constant_size + if duration is not None: + self.duration = duration + self.end = duration + + @property + def w(self): + return self.size[0] + + @property + def h(self): + return self.size[1] + + @property + def aspect_ratio(self): + return self.w / float(self.h) + + # =============================================================== + # EXPORT OPERATIONS + +
[docs] @convert_to_seconds(['t']) + @convert_masks_to_RGB + def save_frame(self, filename, t=0, withmask=True): + """ Save a clip's frame to an image file. + + Saves the frame of clip corresponding to time ``t`` in + 'filename'. ``t`` can be expressed in seconds (15.35), in + (min, sec), in (hour, min, sec), or as a string: '01:03:05.35'. + + If ``withmask`` is ``True`` the mask is saved in + the alpha layer of the picture (only works with PNGs). + + """ + + im = self.get_frame(t) + if withmask and self.mask is not None: + mask = 255 * self.mask.get_frame(t) + im = np.dstack([im, mask]).astype('uint8') + else: + im = im.astype("uint8") + + imsave(filename, im)
+ +
[docs] @requires_duration + @use_clip_fps_by_default + @convert_masks_to_RGB + def write_videofile(self, filename, fps=None, codec=None, + bitrate=None, audio=True, audio_fps=44100, + preset="medium", + audio_nbytes=4, audio_codec=None, + audio_bitrate=None, audio_bufsize=2000, + temp_audiofile=None, + rewrite_audio=True, remove_temp=True, + write_logfile=False, verbose=True, + threads=None, ffmpeg_params=None, + logger='bar'): + """Write the clip to a videofile. + + Parameters + ----------- + + filename + Name of the video file to write in. + The extension must correspond to the "codec" used (see below), + or simply be '.avi' (which will work with any codec). + + fps + Number of frames per second in the resulting video file. If None is + provided, and the clip has an fps attribute, this fps will be used. + + codec + Codec to use for image encoding. Can be any codec supported + by ffmpeg. If the filename is has extension '.mp4', '.ogv', '.webm', + the codec will be set accordingly, but you can still set it if you + don't like the default. For other extensions, the output filename + must be set accordingly. + + Some examples of codecs are: + + ``'libx264'`` (default codec for file extension ``.mp4``) + makes well-compressed videos (quality tunable using 'bitrate'). + + + ``'mpeg4'`` (other codec for extension ``.mp4``) can be an alternative + to ``'libx264'``, and produces higher quality videos by default. + + + ``'rawvideo'`` (use file extension ``.avi``) will produce + a video of perfect quality, of possibly very huge size. + + + ``png`` (use file extension ``.avi``) will produce a video + of perfect quality, of smaller size than with ``rawvideo``. + + + ``'libvorbis'`` (use file extension ``.ogv``) is a nice video + format, which is completely free/ open source. However not + everyone has the codecs installed by default on their machine. + + + ``'libvpx'`` (use file extension ``.webm``) is tiny a video + format well indicated for web videos (with HTML5). Open source. + + + audio + Either ``True``, ``False``, or a file name. + If ``True`` and the clip has an audio clip attached, this + audio clip will be incorporated as a soundtrack in the movie. + If ``audio`` is the name of an audio file, this audio file + will be incorporated as a soundtrack in the movie. + + audiofps + frame rate to use when generating the sound. + + temp_audiofile + the name of the temporary audiofile to be generated and + incorporated in the the movie, if any. + + audio_codec + Which audio codec should be used. Examples are 'libmp3lame' + for '.mp3', 'libvorbis' for 'ogg', 'libfdk_aac':'m4a', + 'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav. + Default is 'libmp3lame', unless the video extension is 'ogv' + or 'webm', at which case the default is 'libvorbis'. + + audio_bitrate + Audio bitrate, given as a string like '50k', '500k', '3000k'. + Will determine the size/quality of audio in the output file. + Note that it mainly an indicative goal, the bitrate won't + necessarily be the this in the final file. + + preset + Sets the time that FFMPEG will spend optimizing the compression. + Choices are: ultrafast, superfast, veryfast, faster, fast, medium, + slow, slower, veryslow, placebo. Note that this does not impact + the quality of the video, only the size of the video file. So + choose ultrafast when you are in a hurry and file size does not + matter. + + threads + Number of threads to use for ffmpeg. Can speed up the writing of + the video on multicore computers. + + ffmpeg_params + Any additional ffmpeg parameters you would like to pass, as a list + of terms, like ['-option1', 'value1', '-option2', 'value2']. + + write_logfile + If true, will write log files for the audio and the video. + These will be files ending with '.log' with the name of the + output file in them. + + logger + Either "bar" for progress bar or None or any Proglog logger. + + verbose (deprecated, kept for compatibility) + Formerly used for toggling messages on/off. Use logger=None now. + + Examples + ======== + + >>> from moviepy.editor import VideoFileClip + >>> clip = VideoFileClip("myvideo.mp4").subclip(100,120) + >>> clip.write_videofile("my_new_video.mp4") + >>> clip.close() + + """ + name, ext = os.path.splitext(os.path.basename(filename)) + ext = ext[1:].lower() + logger = proglog.default_bar_logger(logger) + + if codec is None: + + try: + codec = extensions_dict[ext]['codec'][0] + except KeyError: + raise ValueError("MoviePy couldn't find the codec associated " + "with the filename. Provide the 'codec' " + "parameter in write_videofile.") + + if audio_codec is None: + if ext in ['ogv', 'webm']: + audio_codec = 'libvorbis' + else: + audio_codec = 'libmp3lame' + elif audio_codec == 'raw16': + audio_codec = 'pcm_s16le' + elif audio_codec == 'raw32': + audio_codec = 'pcm_s32le' + + audiofile = audio if is_string(audio) else None + make_audio = ((audiofile is None) and (audio == True) and + (self.audio is not None)) + + if make_audio and temp_audiofile: + # The audio will be the clip's audio + audiofile = temp_audiofile + elif make_audio: + audio_ext = find_extension(audio_codec) + audiofile = (name + Clip._TEMP_FILES_PREFIX + "wvf_snd.%s" % audio_ext) + + # enough cpu for multiprocessing ? USELESS RIGHT NOW, WILL COME AGAIN + # enough_cpu = (multiprocessing.cpu_count() > 1) + logger(message="Moviepy - Building video %s." % filename) + if make_audio: + self.audio.write_audiofile(audiofile, audio_fps, + audio_nbytes, audio_bufsize, + audio_codec, bitrate=audio_bitrate, + write_logfile=write_logfile, + verbose=verbose, + logger=logger) + + ffmpeg_write_video(self, filename, fps, codec, + bitrate=bitrate, + preset=preset, + write_logfile=write_logfile, + audiofile=audiofile, + verbose=verbose, threads=threads, + ffmpeg_params=ffmpeg_params, + logger=logger) + + if remove_temp and make_audio: + if os.path.exists(audiofile): + os.remove(audiofile) + logger(message="Moviepy - video ready %s" % filename)
+ +
[docs] @requires_duration + @use_clip_fps_by_default + @convert_masks_to_RGB + def write_images_sequence(self, nameformat, fps=None, verbose=True, + withmask=True, logger='bar'): + """ Writes the videoclip to a sequence of image files. + + Parameters + ----------- + + nameformat + A filename specifying the numerotation format and extension + of the pictures. For instance "frame%03d.png" for filenames + indexed with 3 digits and PNG format. Also possible: + "some_folder/frame%04d.jpeg", etc. + + fps + Number of frames per second to consider when writing the + clip. If not specified, the clip's ``fps`` attribute will + be used if it has one. + + withmask + will save the clip's mask (if any) as an alpha canal (PNGs only). + + verbose + Boolean indicating whether to print information. + + logger + Either 'bar' (progress bar) or None or any Proglog logger. + + + Returns + -------- + + names_list + A list of all the files generated. + + Notes + ------ + + The resulting image sequence can be read using e.g. the class + ``ImageSequenceClip``. + + """ + logger = proglog.default_bar_logger(logger) + logger(message='Moviepy - Writing frames %s.' % nameformat) + + tt = np.arange(0, self.duration, 1.0 / fps) + + filenames = [] + for i, t in logger.iter_bar(t=list(enumerate(tt))): + name = nameformat % i + filenames.append(name) + self.save_frame(name, t, withmask=withmask) + logger(message='Moviepy - Done writing frames %s.' % nameformat) + + return filenames
+ +
[docs] @requires_duration + @convert_masks_to_RGB + def write_gif(self, filename, fps=None, program='imageio', + opt='nq', fuzz=1, verbose=True, + loop=0, dispose=False, colors=None, tempfiles=False, + logger='bar'): + """ Write the VideoClip to a GIF file. + + Converts a VideoClip into an animated GIF using ImageMagick + or ffmpeg. + + Parameters + ----------- + + filename + Name of the resulting gif file. + + fps + Number of frames per second (see note below). If it + isn't provided, then the function will look for the clip's + ``fps`` attribute (VideoFileClip, for instance, have one). + + program + Software to use for the conversion, either 'imageio' (this will use + the library FreeImage through ImageIO), or 'ImageMagick', or 'ffmpeg'. + + opt + Optimalization to apply. If program='imageio', opt must be either 'wu' + (Wu) or 'nq' (Neuquant). If program='ImageMagick', + either 'optimizeplus' or 'OptimizeTransparency'. + + fuzz + (ImageMagick only) Compresses the GIF by considering that + the colors that are less than fuzz% different are in fact + the same. + + tempfiles + Writes every frame to a file instead of passing them in the RAM. + Useful on computers with little RAM. Can only be used with + ImageMagick' or 'ffmpeg'. + + progress_bar + If True, displays a progress bar + + + Notes + ----- + + The gif will be playing the clip in real time (you can + only change the frame rate). If you want the gif to be played + slower than the clip you will use :: + + >>> # slow down clip 50% and make it a gif + >>> myClip.speedx(0.5).to_gif('myClip.gif') + + """ + # A little sketchy at the moment, maybe move all that in write_gif, + # refactor a little... we will see. + + if program == 'imageio': + write_gif_with_image_io(self, filename, fps=fps, opt=opt, loop=loop, + verbose=verbose, colors=colors, + logger=logger) + elif tempfiles: + # convert imageio opt variable to something that can be used with + # ImageMagick + opt = 'optimizeplus' if opt == 'nq' else 'OptimizeTransparency' + write_gif_with_tempfiles(self, filename, fps=fps, + program=program, opt=opt, fuzz=fuzz, + verbose=verbose, loop=loop, + dispose=dispose, colors=colors, + logger=logger) + else: + # convert imageio opt variable to something that can be used with + # ImageMagick + opt = 'optimizeplus' if opt == 'nq' else 'OptimizeTransparency' + write_gif(self, filename, fps=fps, program=program, + opt=opt, fuzz=fuzz, verbose=verbose, loop=loop, + dispose=dispose, colors=colors, + logger=logger)
+ + # ----------------------------------------------------------------- + # F I L T E R I N G + +
[docs] def subfx(self, fx, ta=0, tb=None, **kwargs): + """Apply a transformation to a part of the clip. + + Returns a new clip in which the function ``fun`` (clip->clip) + has been applied to the subclip between times `ta` and `tb` + (in seconds). + + Examples + --------- + + >>> # The scene between times t=3s and t=6s in ``clip`` will be + >>> # be played twice slower in ``newclip`` + >>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6) + + """ + left = self.subclip(0, ta) if ta else None + center = self.subclip(ta, tb).fx(fx, **kwargs) + right = self.subclip(t_start=tb) if tb else None + + clips = [c for c in (left, center, right) if c] + + # beurk, have to find other solution + from moviepy.video.compositing.concatenate import concatenate_videoclips + + return concatenate_videoclips(clips).set_start(self.start)
+ + # IMAGE FILTERS + +
[docs] def fl_image(self, image_func, apply_to=None): + """ + Modifies the images of a clip by replacing the frame + `get_frame(t)` by another frame, `image_func(get_frame(t))` + """ + apply_to = apply_to or [] + return self.fl(lambda gf, t: image_func(gf(t)), apply_to)
+ + # -------------------------------------------------------------- + # C O M P O S I T I N G + + def fill_array(self, pre_array, shape=(0, 0)): + pre_shape = pre_array.shape + dx = shape[0] - pre_shape[0] + dy = shape[1] - pre_shape[1] + post_array = pre_array + if dx < 0: + post_array = pre_array[:shape[0]] + elif dx > 0: + x_1 = [[[1, 1, 1]] * pre_shape[1]] * dx + post_array = np.vstack((pre_array, x_1)) + if dy < 0: + post_array = post_array[:, :shape[1]] + elif dy > 0: + x_1 = [[[1, 1, 1]] * dy] * post_array.shape[0] + post_array = np.hstack((post_array, x_1)) + return post_array + +
[docs] def blit_on(self, picture, t): + """ + Returns the result of the blit of the clip's frame at time `t` + on the given `picture`, the position of the clip being given + by the clip's ``pos`` attribute. Meant for compositing. + """ + hf, wf = framesize = picture.shape[:2] + + if self.ismask and picture.max(): + return np.minimum(1, picture + self.blit_on(np.zeros(framesize), t)) + + ct = t - self.start # clip time + + # GET IMAGE AND MASK IF ANY + + img = self.get_frame(ct) + mask = self.mask.get_frame(ct) if self.mask else None + + if mask is not None and ((img.shape[0] != mask.shape[0]) or (img.shape[1] != mask.shape[1])): + img = self.fill_array(img, mask.shape) + + hi, wi = img.shape[:2] + + # SET POSITION + pos = self.pos(ct) + + # preprocess short writings of the position + if isinstance(pos, str): + pos = {'center': ['center', 'center'], + 'left': ['left', 'center'], + 'right': ['right', 'center'], + 'top': ['center', 'top'], + 'bottom': ['center', 'bottom']}[pos] + else: + pos = list(pos) + + # is the position relative (given in % of the clip's size) ? + if self.relative_pos: + for i, dim in enumerate([wf, hf]): + if not isinstance(pos[i], str): + pos[i] = dim * pos[i] + + if isinstance(pos[0], str): + D = {'left': 0, 'center': (wf - wi) / 2, 'right': wf - wi} + pos[0] = D[pos[0]] + + if isinstance(pos[1], str): + D = {'top': 0, 'center': (hf - hi) / 2, 'bottom': hf - hi} + pos[1] = D[pos[1]] + + pos = map(int, pos) + + return blit(img, picture, pos, mask=mask, ismask=self.ismask)
+ +
[docs] def add_mask(self): + """Add a mask VideoClip to the VideoClip. + + Returns a copy of the clip with a completely opaque mask + (made of ones). This makes computations slower compared to + having a None mask but can be useful in many cases. Choose + + Set ``constant_size`` to `False` for clips with moving + image size. + """ + if self.has_constant_size: + mask = ColorClip(self.size, 1.0, ismask=True) + return self.set_mask(mask.set_duration(self.duration)) + else: + make_frame = lambda t: np.ones(self.get_frame(t).shape[:2], dtype=float) + mask = VideoClip(ismask=True, make_frame=make_frame) + return self.set_mask(mask.set_duration(self.duration))
+ +
[docs] def on_color(self, size=None, color=(0, 0, 0), pos=None, + col_opacity=None): + """Place the clip on a colored background. + + Returns a clip made of the current clip overlaid on a color + clip of a possibly bigger size. Can serve to flatten transparent + clips. + + Parameters + ----------- + + size + Size (width, height) in pixels of the final clip. + By default it will be the size of the current clip. + + color + Background color of the final clip ([R,G,B]). + + pos + Position of the clip in the final clip. 'center' is the default + + col_opacity + Parameter in 0..1 indicating the opacity of the colored + background. + + """ + from .compositing.CompositeVideoClip import CompositeVideoClip + + if size is None: + size = self.size + if pos is None: + pos = 'center' + colorclip = ColorClip(size, color=color) + + if col_opacity is not None: + colorclip = (ColorClip(size, color=color, duration=self.duration) + .set_opacity(col_opacity)) + result = CompositeVideoClip([colorclip, self.set_position(pos)]) + else: + result = CompositeVideoClip([self.set_position(pos)], + size=size, + bg_color=color) + + if (isinstance(self, ImageClip) and (not hasattr(pos, "__call__")) + and ((self.mask is None) or isinstance(self.mask, ImageClip))): + new_result = result.to_ImageClip() + if result.mask is not None: + new_result.mask = result.mask.to_ImageClip() + return new_result.set_duration(result.duration) + + return result
+ +
[docs] @outplace + def set_make_frame(self, mf): + """Change the clip's ``get_frame``. + + Returns a copy of the VideoClip instance, with the make_frame + attribute set to `mf`. + """ + self.make_frame = mf + self.size = self.get_frame(0).shape[:2][::-1]
+ +
[docs] @outplace + def set_audio(self, audioclip): + """Attach an AudioClip to the VideoClip. + + Returns a copy of the VideoClip instance, with the `audio` + attribute set to ``audio``, which must be an AudioClip instance. + """ + self.audio = audioclip
+ +
[docs] @outplace + def set_mask(self, mask): + """Set the clip's mask. + + Returns a copy of the VideoClip with the mask attribute set to + ``mask``, which must be a greyscale (values in 0-1) VideoClip""" + assert mask is None or mask.ismask + self.mask = mask
+ +
[docs] @add_mask_if_none + @outplace + def set_opacity(self, op): + """Set the opacity/transparency level of the clip. + + Returns a semi-transparent copy of the clip where the mask is + multiplied by ``op`` (any float, normally between 0 and 1). + """ + self.mask = self.mask.fl_image(lambda pic: op * pic)
+ +
[docs] @apply_to_mask + @outplace + def set_position(self, pos, relative=False): + """Set the clip's position in compositions. + + Sets the position that the clip will have when included + in compositions. The argument ``pos`` can be either a couple + ``(x,y)`` or a function ``t-> (x,y)``. `x` and `y` mark the + location of the top left corner of the clip, and can be + of several types. + + Examples + ---------- + + >>> clip.set_position((45,150)) # x=45, y=150 + >>> + >>> # clip horizontally centered, at the top of the picture + >>> clip.set_position(("center","top")) + >>> + >>> # clip is at 40% of the width, 70% of the height: + >>> clip.set_position((0.4,0.7), relative=True) + >>> + >>> # clip's position is horizontally centered, and moving up ! + >>> clip.set_position(lambda t: ('center', 50+t) ) + + """ + self.relative_pos = relative + if hasattr(pos, '__call__'): + self.pos = pos + else: + self.pos = lambda t: pos
+ + # -------------------------------------------------------------- + # CONVERSIONS TO OTHER TYPES + +
[docs] @convert_to_seconds(['t']) + def to_ImageClip(self, t=0, with_mask=True, duration=None): + """ + Returns an ImageClip made out of the clip's frame at time ``t``, + which can be expressed in seconds (15.35), in (min, sec), + in (hour, min, sec), or as a string: '01:03:05.35'. + """ + newclip = ImageClip(self.get_frame(t), ismask=self.ismask, + duration=duration) + if with_mask and self.mask is not None: + newclip.mask = self.mask.to_ImageClip(t) + return newclip
+ +
[docs] def to_mask(self, canal=0): + """Return a mask a video clip made from the clip.""" + if self.ismask: + return self + else: + newclip = self.fl_image(lambda pic: + 1.0 * pic[:, :, canal] / 255) + newclip.ismask = True + return newclip
+ +
[docs] def to_RGB(self): + """Return a non-mask video clip made from the mask video clip.""" + if self.ismask: + f = lambda pic: np.dstack(3 * [255 * pic]).astype('uint8') + newclip = self.fl_image(f) + newclip.ismask = False + return newclip + else: + return self
+ + # ---------------------------------------------------------------- + # Audio + +
[docs] @outplace + def without_audio(self): + """Remove the clip's audio. + + Return a copy of the clip with audio set to None. + + """ + self.audio = None
+ +
[docs] @outplace + def afx(self, fun, *a, **k): + """Transform the clip's audio. + + Return a new clip whose audio has been transformed by ``fun``. + + """ + self.audio = self.audio.fx(fun, *a, **k)
+ + +class DataVideoClip(VideoClip): + """ + Class of video clips whose successive frames are functions + of successive datasets + + Parameters + ----------- + data + A liste of datasets, each dataset being used for one frame of the clip + + data_to_frame + A function d -> video frame, where d is one element of the list `data` + + fps + Number of frames per second in the animation + + Examples + --------- + """ + + def __init__(self, data, data_to_frame, fps, ismask=False, + has_constant_size=True): + self.data = data + self.data_to_frame = data_to_frame + self.fps = fps + make_frame = lambda t: self.data_to_frame(self.data[int(self.fps*t)]) + VideoClip.__init__(self, make_frame, ismask=ismask, + duration=1.0*len(data)/fps, + has_constant_size=has_constant_size) + + +class UpdatedVideoClip(VideoClip): + """ + Class of clips whose make_frame requires some objects to + be updated. Particularly practical in science where some + algorithm needs to make some steps before a new frame can + be generated. + + UpdatedVideoClips have the following make_frame: + + >>> def make_frame(t): + >>> while self.world.clip_t < t: + >>> world.update() # updates, and increases world.clip_t + >>> return world.to_frame() + + Parameters + ----------- + + world + An object with the following attributes: + - world.clip_t : the clip's time corresponding to the + world's state + - world.update() : update the world's state, (including + increasing world.clip_t of one time step) + - world.to_frame() : renders a frame depending on the world's state + + ismask + True if the clip is a WxH mask with values in 0-1 + + duration + Duration of the clip, in seconds + + """ + + def __init__(self, world, ismask=False, duration=None): + self.world = world + + def make_frame(t): + while self.world.clip_t < t: + world.update() + return world.to_frame() + + VideoClip.__init__(self, make_frame=make_frame, + ismask=ismask, duration=duration) + + +"""--------------------------------------------------------------------- + + ImageClip (base class for all 'static clips') and its subclasses + ColorClip and TextClip. + I would have liked to put these in a separate file but Python is bad + at cyclic imports. + +---------------------------------------------------------------------""" + + +
[docs]class ImageClip(VideoClip): + """Class for non-moving VideoClips. + + A video clip originating from a picture. This clip will simply + display the given picture at all times. + + Examples + --------- + + >>> clip = ImageClip("myHouse.jpeg") + >>> clip = ImageClip( someArray ) # a Numpy array represent + + Parameters + ----------- + + img + Any picture file (png, tiff, jpeg, etc.) or any array representing + an RGB image (for instance a frame from a VideoClip). + + ismask + Set this parameter to `True` if the clip is a mask. + + transparent + Set this parameter to `True` (default) if you want the alpha layer + of the picture (if it exists) to be used as a mask. + + Attributes + ----------- + + img + Array representing the image of the clip. + + """ + + def __init__(self, img, ismask=False, transparent=True, + fromalpha=False, duration=None): + VideoClip.__init__(self, ismask=ismask, duration=duration) + + if isinstance(img, string_types): + img = imread(img) + + if len(img.shape) == 3: # img is (now) a RGB(a) numpy array + + if img.shape[2] == 4: + if fromalpha: + img = 1.0 * img[:, :, 3] / 255 + elif ismask: + img = 1.0 * img[:, :, 0] / 255 + elif transparent: + self.mask = ImageClip( + 1.0 * img[:, :, 3] / 255, ismask=True) + img = img[:, :, :3] + elif ismask: + img = 1.0 * img[:, :, 0] / 255 + + # if the image was just a 2D mask, it should arrive here + # unchanged + self.make_frame = lambda t: img + self.size = img.shape[:2][::-1] + self.img = img + +
[docs] def fl(self, fl, apply_to=None, keep_duration=True): + """General transformation filter. + + Equivalent to VideoClip.fl . The result is no more an + ImageClip, it has the class VideoClip (since it may be animated) + """ + if apply_to is None: + apply_to = [] + # When we use fl on an image clip it may become animated. + # Therefore the result is not an ImageClip, just a VideoClip. + newclip = VideoClip.fl(self, fl, apply_to=apply_to, + keep_duration=keep_duration) + newclip.__class__ = VideoClip + return newclip
+ +
[docs] @outplace + def fl_image(self, image_func, apply_to=None): + """Image-transformation filter. + + Does the same as VideoClip.fl_image, but for ImageClip the + tranformed clip is computed once and for all at the beginning, + and not for each 'frame'. + """ + if apply_to is None: + apply_to = [] + arr = image_func(self.get_frame(0)) + self.size = arr.shape[:2][::-1] + self.make_frame = lambda t: arr + self.img = arr + + for attr in apply_to: + a = getattr(self, attr, None) + if a is not None: + new_a = a.fl_image(image_func) + setattr(self, attr, new_a)
+ +
[docs] @outplace + def fl_time(self, time_func, apply_to=None, + keep_duration=False): + """Time-transformation filter. + + Applies a transformation to the clip's timeline + (see Clip.fl_time). + + This method does nothing for ImageClips (but it may affect their + masks or their audios). The result is still an ImageClip. + """ + if apply_to is None: + apply_to = ['mask', 'audio'] + for attr in apply_to: + a = getattr(self, attr, None) + if a is not None: + new_a = a.fl_time(time_func) + setattr(self, attr, new_a)
+ + +# ## +# +# The old functions to_videofile, to_gif, to_images sequences have been +# replaced by the more explicite write_videofile, write_gif, etc. + +VideoClip.set_pos = deprecated_version_of(VideoClip.set_position, + 'set_pos') +VideoClip.to_videofile = deprecated_version_of(VideoClip.write_videofile, + 'to_videofile') +VideoClip.to_gif = deprecated_version_of(VideoClip.write_gif, 'to_gif') +VideoClip.to_images_sequence = deprecated_version_of(VideoClip.write_images_sequence, + 'to_images_sequence') + + +
[docs]class ColorClip(ImageClip): + """An ImageClip showing just one color. + + Parameters + ----------- + + size + Size (width, height) in pixels of the clip. + + color + If argument ``ismask`` is False, ``color`` indicates + the color in RGB of the clip (default is black). If `ismask`` + is True, ``color`` must be a float between 0 and 1 (default is 1) + + ismask + Set to true if the clip will be used as a mask. + + col + Has been deprecated. Do not use. + """ + + def __init__(self, size, color=None, ismask=False, duration=None, col=None): + if col is not None: + warnings.warn("The `ColorClip` parameter `col` has been deprecated." + " Please use `color` instead.", DeprecationWarning) + if color is not None: + warnings.warn("The arguments `color` and `col` have both been " + "passed to `ColorClip` so `col` has been ignored.", + UserWarning) + else: + color = col + w, h = size + shape = (h, w) if np.isscalar(color) else (h, w, len(color)) + ImageClip.__init__(self, np.tile(color, w * h).reshape(shape), + ismask=ismask, duration=duration)
+ + +
[docs]class TextClip(ImageClip): + """Class for autogenerated text clips. + + Creates an ImageClip originating from a script-generated text image. + Requires ImageMagick. + + Parameters + ----------- + + txt + A string of the text to write. Can be replaced by argument + ``filename``. + + filename + The name of a file in which there is the text to write. + Can be provided instead of argument ``txt`` + + size + Size of the picture in pixels. Can be auto-set if + method='label', but mandatory if method='caption'. + the height can be None, it will then be auto-determined. + + bg_color + Color of the background. See ``TextClip.list('color')`` + for a list of acceptable names. + + color + Color of the text. See ``TextClip.list('color')`` for a + list of acceptable names. + + font + Name of the font to use. See ``TextClip.list('font')`` for + the list of fonts you can use on your computer. + + stroke_color + Color of the stroke (=contour line) of the text. If ``None``, + there will be no stroke. + + stroke_width + Width of the stroke, in pixels. Can be a float, like 1.5. + + method + Either 'label' (default, the picture will be autosized so as to fit + exactly the size) or 'caption' (the text will be drawn in a picture + with fixed size provided with the ``size`` argument). If `caption`, + the text will be wrapped automagically (sometimes it is buggy, not + my fault, complain to the ImageMagick crew) and can be aligned or + centered (see parameter ``align``). + + kerning + Changes the default spacing between letters. For + instance ``kerning=-1`` will make the letters 1 pixel nearer from + ach other compared to the default spacing. + + align + center | East | West | South | North . Will only work if ``method`` + is set to ``caption`` + + transparent + ``True`` (default) if you want to take into account the + transparency in the image. + + """ + + def __init__(self, txt=None, filename=None, size=None, color='black', + bg_color='transparent', fontsize=None, font='Courier', + stroke_color=None, stroke_width=1, method='label', + kerning=None, align='center', interline=None, + tempfilename=None, temptxt=None, + transparent=True, remove_temp=True, + print_cmd=False): + + if txt is not None: + if temptxt is None: + temptxt_fd, temptxt = tempfile.mkstemp(suffix='.txt') + try: # only in Python3 will this work + os.write(temptxt_fd, bytes(txt, 'UTF8')) + except TypeError: # oops, fall back to Python2 + os.write(temptxt_fd, txt) + os.close(temptxt_fd) + txt = '@' + temptxt + else: + # use a file instead of a text. + txt = "@%" + filename + + if size is not None: + size = ('' if size[0] is None else str(size[0]), + '' if size[1] is None else str(size[1])) + + cmd = ([get_setting("IMAGEMAGICK_BINARY"), + "-background", bg_color, + "-fill", color, + "-font", font]) + + if fontsize is not None: + cmd += ["-pointsize", "%d" % fontsize] + if kerning is not None: + cmd += ["-kerning", "%0.1f" % kerning] + if stroke_color is not None: + cmd += ["-stroke", stroke_color, "-strokewidth", + "%.01f" % stroke_width] + if size is not None: + cmd += ["-size", "%sx%s" % (size[0], size[1])] + if align is not None: + cmd += ["-gravity", align] + if interline is not None: + cmd += ["-interline-spacing", "%d" % interline] + + if tempfilename is None: + tempfile_fd, tempfilename = tempfile.mkstemp(suffix='.png') + os.close(tempfile_fd) + + cmd += ["%s:%s" % (method, txt), + "-type", "truecolormatte", "PNG32:%s" % tempfilename] + + if print_cmd: + print(" ".join(cmd)) + + try: + subprocess_call(cmd, logger=None) + except (IOError, OSError) as err: + error = ("MoviePy Error: creation of %s failed because of the " + "following error:\n\n%s.\n\n." % (filename, str(err)) + + ("This error can be due to the fact that ImageMagick " + "is not installed on your computer, or (for Windows " + "users) that you didn't specify the path to the " + "ImageMagick binary in file conf.py, or that the path " + "you specified is incorrect")) + raise IOError(error) + + ImageClip.__init__(self, tempfilename, transparent=transparent) + self.txt = txt + self.color = color + self.stroke_color = stroke_color + + if remove_temp: + if os.path.exists(tempfilename): + os.remove(tempfilename) + if os.path.exists(temptxt): + os.remove(temptxt) + +
[docs] @staticmethod + def list(arg): + """Returns the list of all valid entries for the argument of + ``TextClip`` given (can be ``font``, ``color``, etc...) """ + + popen_params = {"stdout": sp.PIPE, + "stderr": DEVNULL, + "stdin": DEVNULL} + + if os.name == "nt": + popen_params["creationflags"] = 0x08000000 + + process = sp.Popen([get_setting("IMAGEMAGICK_BINARY"), + '-list', arg], **popen_params) + result = process.communicate()[0] + lines = result.splitlines() + + if arg == 'font': + return [l.decode('UTF-8')[8:] for l in lines if l.startswith(b" Font:")] + elif arg == 'color': + return [l.split(b" ")[0] for l in lines[2:]] + else: + raise Exception("Moviepy:Error! Argument must equal " + "'font' or 'color'")
+ +
[docs] @staticmethod + def search(string, arg): + """Returns the of all valid entries which contain ``string`` for the + argument ``arg`` of ``TextClip``, for instance + + >>> # Find all the available fonts which contain "Courier" + >>> print ( TextClip.search('Courier', 'font') ) + + """ + string = string.lower() + names_list = TextClip.list(arg) + return [name for name in names_list if string in name.lower()]
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/compositing/CompositeVideoClip.html b/v1.0.3/_modules/moviepy/video/compositing/CompositeVideoClip.html new file mode 100644 index 000000000..fb340633e --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/compositing/CompositeVideoClip.html @@ -0,0 +1,386 @@ + + + + + + + + + + moviepy.video.compositing.CompositeVideoClip — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.compositing.CompositeVideoClip
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.compositing.CompositeVideoClip

+import numpy as np
+
+from moviepy.audio.AudioClip import CompositeAudioClip
+from moviepy.video.VideoClip import ColorClip, VideoClip
+
+#  CompositeVideoClip
+
+
[docs]class CompositeVideoClip(VideoClip): + + """ + + A VideoClip made of other videoclips displayed together. This is the + base class for most compositions. + + Parameters + ---------- + + size + The size (height x width) of the final clip. + + clips + A list of videoclips. Each clip of the list will + be displayed below the clips appearing after it in the list. + For each clip: + + - The attribute ``pos`` determines where the clip is placed. + See ``VideoClip.set_pos`` + - The mask of the clip determines which parts are visible. + + Finally, if all the clips in the list have their ``duration`` + attribute set, then the duration of the composite video clip + is computed automatically + + bg_color + Color for the unmasked and unfilled regions. Set to None for these + regions to be transparent (will be slower). + + use_bgclip + Set to True if the first clip in the list should be used as the + 'background' on which all other clips are blitted. That first clip must + have the same size as the final clip. If it has no transparency, the final + clip will have no mask. + + The clip with the highest FPS will be the FPS of the composite clip. + + """ + + def __init__(self, clips, size=None, bg_color=None, use_bgclip=False, + ismask=False): + + if size is None: + size = clips[0].size + + + if use_bgclip and (clips[0].mask is None): + transparent = False + else: + transparent = (bg_color is None) + + if bg_color is None: + bg_color = 0.0 if ismask else (0, 0, 0) + + fpss = [c.fps for c in clips if getattr(c, 'fps', None)] + self.fps = max(fpss) if fpss else None + + VideoClip.__init__(self) + + self.size = size + self.ismask = ismask + self.clips = clips + self.bg_color = bg_color + + if use_bgclip: + self.bg = clips[0] + self.clips = clips[1:] + self.created_bg = False + else: + self.clips = clips + self.bg = ColorClip(size, color=self.bg_color) + self.created_bg = True + + + # compute duration + ends = [c.end for c in self.clips] + if None not in ends: + duration = max(ends) + self.duration = duration + self.end = duration + + # compute audio + audioclips = [v.audio for v in self.clips if v.audio is not None] + if audioclips: + self.audio = CompositeAudioClip(audioclips) + + # compute mask if necessary + if transparent: + maskclips = [(c.mask if (c.mask is not None) else + c.add_mask().mask).set_position(c.pos) + .set_end(c.end).set_start(c.start, change_end=False) + for c in self.clips] + + self.mask = CompositeVideoClip(maskclips,self.size, ismask=True, + bg_color=0.0) + + def make_frame(t): + """ The clips playing at time `t` are blitted over one + another. """ + + f = self.bg.get_frame(t) + for c in self.playing_clips(t): + f = c.blit_on(f, t) + return f + + self.make_frame = make_frame + +
[docs] def playing_clips(self, t=0): + """ Returns a list of the clips in the composite clips that are + actually playing at the given time `t`. """ + return [c for c in self.clips if c.is_playing(t)]
+ +
[docs] def close(self): + if self.created_bg and self.bg: + # Only close the background clip if it was locally created. + # Otherwise, it remains the job of whoever created it. + self.bg.close() + self.bg = None + if hasattr(self, "audio") and self.audio: + self.audio.close() + self.audio = None
+ + + +def clips_array(array, rows_widths=None, cols_widths=None, + bg_color = None): + + """ + + rows_widths + widths of the different rows in pixels. If None, is set automatically. + + cols_widths + widths of the different colums in pixels. If None, is set automatically. + + cols_widths + + bg_color + Fill color for the masked and unfilled regions. Set to None for these + regions to be transparent (will be slower). + + """ + + array = np.array(array) + sizes_array = np.array([[c.size for c in line] for line in array]) + + # find row width and col_widths automatically if not provided + if rows_widths is None: + rows_widths = sizes_array[:,:,1].max(axis=1) + if cols_widths is None: + cols_widths = sizes_array[:,:,0].max(axis=0) + + xx = np.cumsum([0]+list(cols_widths)) + yy = np.cumsum([0]+list(rows_widths)) + + for j, (x, cw) in enumerate(zip(xx[:-1], cols_widths)): + for i, (y, rw) in enumerate(zip(yy[:-1], rows_widths)): + clip = array[i, j] + w, h = clip.size + if (w < cw) or (h < rw): + clip = (CompositeVideoClip([clip.set_position('center')], + size = (cw,rw), + bg_color = bg_color). + set_duration(clip.duration)) + + array[i, j] = clip.set_position((x, y)) + + return CompositeVideoClip(array.flatten(), size=(xx[-1], yy[-1]), bg_color=bg_color) +
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/accel_decel.html b/v1.0.3/_modules/moviepy/video/fx/accel_decel.html new file mode 100644 index 000000000..226fefdac --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/accel_decel.html @@ -0,0 +1,254 @@ + + + + + + + + + + moviepy.video.fx.accel_decel — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.accel_decel
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.accel_decel

+def f_accel_decel(t, old_d, new_d, abruptness=1, soonness=1.0):
+    """
+    abruptness
+      negative abruptness (>-1): speed up down up
+      zero abruptness : no effect
+      positive abruptness: speed down up down
+      
+    soonness
+      for positive abruptness, determines how soon the
+      speedup occurs (0<soonness < inf)
+    """
+    
+    a = 1.0+abruptness
+    def _f(t):
+        f1 = lambda t: (0.5)**(1-a)*(t**a)
+        f2 = lambda t: (1-f1(1-t))
+        return (t<.5)*f1(t) + (t>=.5)*f2(t) 
+    
+    return old_d*_f((t/new_d)**soonness)
+
+
+
[docs]def accel_decel(clip, new_duration=None, abruptness=1.0, soonness=1.0): + """ + + new_duration + If None, will be that of the current clip. + + abruptness + negative abruptness (>-1): speed up down up + zero abruptness : no effect + positive abruptness: speed down up down + + soonness + for positive abruptness, determines how soon the + speedup occurs (0<soonness < inf) + """ + + if new_duration is None: + new_duration = clip.duration + + fl = lambda t : f_accel_decel(t, clip.duration, new_duration, + abruptness, soonness) + + return clip.fl_time(fl).set_duration(new_duration)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/blackwhite.html b/v1.0.3/_modules/moviepy/video/fx/blackwhite.html new file mode 100644 index 000000000..a28dfbaa5 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/blackwhite.html @@ -0,0 +1,232 @@ + + + + + + + + + + moviepy.video.fx.blackwhite — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.blackwhite
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.blackwhite

+import numpy as np
+
+
+
[docs]def blackwhite(clip, RGB = None, preserve_luminosity=True): + """ Desaturates the picture, makes it black and white. + Parameter RGB allows to set weights for the different color + channels. + If RBG is 'CRT_phosphor' a special set of values is used. + preserve_luminosity maintains the sum of RGB to 1.""" + if RGB is None: + RGB = [1,1,1] + + if RGB == 'CRT_phosphor': + RGB = [0.2125, 0.7154, 0.0721] + + R,G,B = 1.0*np.array(RGB)/ (sum(RGB) if preserve_luminosity else 1) + + def fl(im): + im = (R*im[:,:,0] + G*im[:,:,1] + B*im[:,:,2]) + return np.dstack(3*[im]).astype('uint8') + + return clip.fl_image(fl)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/blink.html b/v1.0.3/_modules/moviepy/video/fx/blink.html new file mode 100644 index 000000000..859936fa3 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/blink.html @@ -0,0 +1,225 @@ + + + + + + + + + + moviepy.video.fx.blink — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.blink
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.blink

+import copy
+
+
+
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/colorx.html b/v1.0.3/_modules/moviepy/video/fx/colorx.html new file mode 100644 index 000000000..bd59702cd --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/colorx.html @@ -0,0 +1,220 @@ + + + + + + + + + + moviepy.video.fx.colorx — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.colorx
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.colorx

+import numpy as np
+
+
+
[docs]def colorx(clip, factor): + """ multiplies the clip's colors by the given factor, can be used + to decrease or increase the clip's brightness (is that the + reight word ?) + """ + return clip.fl_image( lambda pic: np.minimum(255,(factor*pic)). + astype('uint8'))
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/crop.html b/v1.0.3/_modules/moviepy/video/fx/crop.html new file mode 100644 index 000000000..fd96bc137 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/crop.html @@ -0,0 +1,263 @@ + + + + + + + + + + moviepy.video.fx.crop — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.crop
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.crop

+
[docs]def crop(clip, x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None): + """ + Returns a new clip in which just a rectangular subregion of the + original clip is conserved. x1,y1 indicates the top left corner and + x2,y2 is the lower right corner of the croped region. + All coordinates are in pixels. Float numbers are accepted. + + To crop an arbitrary rectangle: + + >>> crop(clip, x1=50, y1=60, x2=460, y2=275) + + Only remove the part above y=30: + + >>> crop(clip, y1=30) + + Crop a rectangle that starts 10 pixels left and is 200px wide + + >>> crop(clip, x1=10, width=200) + + Crop a rectangle centered in x,y=(300,400), width=50, height=150 : + + >>> crop(clip, x_center=300 , y_center=400, + width=50, height=150) + + Any combination of the above should work, like for this rectangle + centered in x=300, with explicit y-boundaries: + + >>> crop(x_center=300, width=400, y1=100, y2=600) + + """ + + if width and x1 is not None: + x2 = x1 + width + elif width and x2 is not None: + x1 = x2 - width + + if height and y1 is not None: + y2 = y1 + height + elif height and y2 is not None: + y1 = y2 - height + + if x_center: + x1, x2 = x_center - width / 2, x_center + width / 2 + + if y_center: + y1, y2 = y_center - height / 2, y_center + height / 2 + + x1 = x1 or 0 + y1 = y1 or 0 + x2 = x2 or clip.size[0] + y2 = y2 or clip.size[1] + + return clip.fl_image(lambda pic: pic[int(y1) : int(y2), int(x1) : int(x2)], apply_to=["mask"])
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/even_size.html b/v1.0.3/_modules/moviepy/video/fx/even_size.html new file mode 100644 index 000000000..70298bf3a --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/even_size.html @@ -0,0 +1,232 @@ + + + + + + + + + + moviepy.video.fx.even_size — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.even_size
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.even_size

+from moviepy.decorators import apply_to_mask
+
+
+
[docs]@apply_to_mask +def even_size(clip): + """ + Crops the clip to make dimensions even. + """ + w, h = clip.size + w_even = w % 2 == 0 + h_even = h % 2 == 0 + if w_even and h_even: + return clip + + if not w_even and not h_even: + fl_image = lambda a : a[:-1,:-1,:] + elif w_even: + fl_image = lambda a : a[:,:-1,:] + else: + fl_image = lambda a : a[:-1,:,:] + + return clip.fl_image(fl_image)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/fadein.html b/v1.0.3/_modules/moviepy/video/fx/fadein.html new file mode 100644 index 000000000..cfc77022c --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/fadein.html @@ -0,0 +1,235 @@ + + + + + + + + + + moviepy.video.fx.fadein — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.fadein
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.fadein

+import numpy as np
+
+
+
[docs]def fadein(clip, duration, initial_color=None): + """ + Makes the clip progressively appear from some color (black by default), + over ``duration`` seconds at the beginning of the clip. Can be used for + masks too, where the initial color must be a number between 0 and 1. + For cross-fading (progressive appearance or disappearance of a clip + over another clip, see ``composition.crossfade`` + """ + + if initial_color is None: + initial_color = 0 if clip.ismask else [0,0,0] + + initial_color = np.array(initial_color) + + def fl(gf, t): + if t>=duration: + return gf(t) + else: + fading = (1.0*t/duration) + return fading*gf(t) + (1-fading)*initial_color + + return clip.fl(fl)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/fadeout.html b/v1.0.3/_modules/moviepy/video/fx/fadeout.html new file mode 100644 index 000000000..3b9102353 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/fadeout.html @@ -0,0 +1,238 @@ + + + + + + + + + + moviepy.video.fx.fadeout — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.fadeout
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.fadeout

+import numpy as np
+
+from moviepy.decorators import requires_duration
+
+
+
[docs]@requires_duration +def fadeout(clip, duration, final_color=None): + """ + Makes the clip progressively fade to some color (black by default), + over ``duration`` seconds at the end of the clip. Can be used for + masks too, where the final color must be a number between 0 and 1. + For cross-fading (progressive appearance or disappearance of a clip + over another clip, see ``composition.crossfade`` + """ + + if final_color is None: + final_color = 0 if clip.ismask else [0,0,0] + + final_color = np.array(final_color) + + def fl(gf, t): + if (clip.duration-t)>=duration: + return gf(t) + else: + fading = 1.0 * (clip.duration - t) / duration + return fading*gf(t) + (1-fading)*final_color + + return clip.fl(fl)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/freeze.html b/v1.0.3/_modules/moviepy/video/fx/freeze.html new file mode 100644 index 000000000..e52d41fec --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/freeze.html @@ -0,0 +1,239 @@ + + + + + + + + + + moviepy.video.fx.freeze — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.freeze
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.freeze

+from moviepy.decorators import requires_duration
+from moviepy.video.compositing.concatenate import concatenate_videoclips
+from moviepy.video.VideoClip import ImageClip
+
+
+
[docs]@requires_duration +def freeze(clip, t=0, freeze_duration=None, total_duration=None, + padding_end=0): + """ Momentarily freeze the clip at time t. + + Set `t='end'` to freeze the clip at the end (actually it will freeze on the + frame at time clip.duration - padding_end seconds). + With ``duration``you can specify the duration of the freeze. + With ``total_duration`` you can specify the total duration of + the clip and the freeze (i.e. the duration of the freeze is + automatically calculated). One of them must be provided. + """ + + if t=='end': + t = clip.duration - padding_end + + if freeze_duration is None: + freeze_duration = total_duration - clip.duration + + before = [clip.subclip(0,t)] if (t!=0) else [] + freeze = [clip.to_ImageClip(t).set_duration(freeze_duration)] + after = [clip.subclip(t)] if (t !=clip.duration) else [] + return concatenate_videoclips(before + freeze + after)
+ +
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/freeze_region.html b/v1.0.3/_modules/moviepy/video/fx/freeze_region.html new file mode 100644 index 000000000..427ba8b97 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/freeze_region.html @@ -0,0 +1,267 @@ + + + + + + + + + + moviepy.video.fx.freeze_region — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.freeze_region
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.freeze_region

+from moviepy.decorators import apply_to_mask
+from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
+
+from .crop import crop
+
+
+#@apply_to_mask
+
[docs]def freeze_region(clip, t=0, region=None, outside_region=None, mask=None): + """ Freezes one region of the clip while the rest remains animated. + + You can choose one of three methods by providing either `region`, + `outside_region`, or `mask`. + + Parameters + ----------- + + t + Time at which to freeze the freezed region. + + region + A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) + which will be freezed. You can provide outside_region or mask instead. + + outside_region + A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) + which will be the only non-freezed region. + + mask + If not None, will overlay a freezed version of the clip on the current clip, + with the provided mask. In other words, the "visible" pixels in the mask + indicate the freezed region in the final picture. + + """ + + if region is not None: + + x1, y1, x2, y2 = region + freeze = (clip.fx(crop, *region) + .to_ImageClip(t=t) + .set_duration(clip.duration) + .set_position((x1,y1))) + return CompositeVideoClip([clip, freeze]) + + elif outside_region is not None: + + x1, y1, x2, y2 = outside_region + animated_region = (clip.fx(crop, *outside_region) + .set_position((x1,y1))) + freeze = (clip.to_ImageClip(t=t) + .set_duration(clip.duration)) + return CompositeVideoClip([freeze, animated_region]) + + elif mask is not None: + freeze = (clip.to_ImageClip(t=t) + .set_duration(clip.duration) + .set_mask(mask)) + return CompositeVideoClip([clip, freeze])
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/gamma_corr.html b/v1.0.3/_modules/moviepy/video/fx/gamma_corr.html new file mode 100644 index 000000000..433216e85 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/gamma_corr.html @@ -0,0 +1,218 @@ + + + + + + + + + + moviepy.video.fx.gamma_corr — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.gamma_corr
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.gamma_corr

+
+
[docs]def gamma_corr(clip, gamma): + """ Gamma-correction of a video clip """ + def fl(im): + corrected = (255*(1.0*im/255)**gamma) + return corrected.astype('uint8') + + return clip.fl_image(fl)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/headblur.html b/v1.0.3/_modules/moviepy/video/fx/headblur.html new file mode 100644 index 000000000..2bfc6a3bf --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/headblur.html @@ -0,0 +1,268 @@ + + + + + + + + + + moviepy.video.fx.headblur — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.headblur
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.headblur

+import numpy as np
+
+#------- CHECKING DEPENDENCIES ----------------------------------------- 
+try:
+    import cv2
+    headblur_possible = True
+    if cv2.__version__ >= '3.0.0':
+       cv2.CV_AA=cv2.LINE_AA
+except:
+    headblur_possible = False
+#-----------------------------------------------------------------------
+
+
+def headblur(clip,fx,fy,r_zone,r_blur=None):
+    """
+    Returns a filter that will blurr a moving part (a head ?) of
+    the frames. The position of the blur at time t is
+    defined by (fx(t), fy(t)), the radius of the blurring
+    by ``r_zone`` and the intensity of the blurring by ``r_blur``.
+    Requires OpenCV for the circling and the blurring.
+    Automatically deals with the case where part of the image goes
+    offscreen.
+    """
+    
+    if r_blur is None: r_blur = 2*r_zone/3
+    
+    def fl(gf,t):
+        
+        im = gf(t)
+        h,w,d = im.shape
+        x,y = int(fx(t)),int(fy(t))
+        x1,x2 = max(0,x-r_zone),min(x+r_zone,w)
+        y1,y2 = max(0,y-r_zone),min(y+r_zone,h)
+        region_size = y2-y1,x2-x1
+        
+        mask = np.zeros(region_size).astype('uint8')
+        cv2.circle(mask, (r_zone,r_zone), r_zone, 255, -1,
+                   lineType=cv2.CV_AA)
+                               
+        mask = np.dstack(3*[(1.0/255)*mask])
+        
+        orig = im[y1:y2, x1:x2]
+        blurred = cv2.blur(orig,(r_blur, r_blur))
+        im[y1:y2, x1:x2] = mask*blurred + (1-mask)*orig
+        return im
+    
+    return clip.fl(fl)
+
+
+
+#------- OVERWRITE IF REQUIREMENTS NOT MET -----------------------------
+if not headblur_possible:
+    doc = headblur.__doc__
+
[docs] def headblur(clip,fx,fy,r_zone,r_blur=None): + raise IOError("fx painting needs opencv")
+ + headblur.__doc__ = doc +#----------------------------------------------------------------------- +
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/invert_colors.html b/v1.0.3/_modules/moviepy/video/fx/invert_colors.html new file mode 100644 index 000000000..374f57b06 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/invert_colors.html @@ -0,0 +1,218 @@ + + + + + + + + + + moviepy.video.fx.invert_colors — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.invert_colors
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.invert_colors

+
[docs]def invert_colors(clip): + """ Returns the color-inversed clip. + + The values of all pixels are replaced with (255-v) or (1-v) for masks + Black becomes white, green becomes purple, etc. + """ + maxi = (1.0 if clip.ismask else 255) + return clip.fl_image(lambda f : maxi - f)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/loop.html b/v1.0.3/_modules/moviepy/video/fx/loop.html new file mode 100644 index 000000000..453a62062 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/loop.html @@ -0,0 +1,236 @@ + + + + + + + + + + moviepy.video.fx.loop — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.loop
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.loop

+from moviepy.decorators import apply_to_audio, apply_to_mask, requires_duration
+
+
+
[docs]@requires_duration +@apply_to_mask +@apply_to_audio +def loop(self, n=None, duration=None): + """ + Returns a clip that plays the current clip in an infinite loop. + Ideal for clips coming from gifs. + + Parameters + ------------ + n + Number of times the clip should be played. If `None` the + the clip will loop indefinitely (i.e. with no set duration). + + duration + Total duration of the clip. Can be specified instead of n. + """ + result = self.fl_time(lambda t: t % self.duration) + if n: + duration = n*self.duration + if duration: + result = result.set_duration(duration) + return result
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/lum_contrast.html b/v1.0.3/_modules/moviepy/video/fx/lum_contrast.html new file mode 100644 index 000000000..026c4c140 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/lum_contrast.html @@ -0,0 +1,221 @@ + + + + + + + + + + moviepy.video.fx.lum_contrast — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.lum_contrast
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.lum_contrast

+
[docs]def lum_contrast(clip, lum = 0, contrast=0, contrast_thr=127): + """ luminosity-contrast correction of a clip """ + + def fl_image(im): + im = 1.0*im # float conversion + corrected = im + lum + contrast*(im-float(contrast_thr)) + corrected[corrected < 0] = 0 + corrected[corrected > 255] = 255 + return corrected.astype('uint8') + + return clip.fl_image(fl_image)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/make_loopable.html b/v1.0.3/_modules/moviepy/video/fx/make_loopable.html new file mode 100644 index 000000000..a3c4ff9f3 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/make_loopable.html @@ -0,0 +1,224 @@ + + + + + + + + + + moviepy.video.fx.make_loopable — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.make_loopable
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.make_loopable

+import moviepy.video.compositing.transitions as transfx
+from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
+
+
+
[docs]def make_loopable(clip, cross): + """ + Makes the clip fade in progressively at its own end, this way + it can be looped indefinitely. ``cross`` is the duration in seconds + of the fade-in. """ + d = clip.duration + clip2 = clip.fx(transfx.crossfadein, cross).\ + set_start(d - cross) + return CompositeVideoClip([ clip, clip2 ]).\ + subclip(cross,d)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/margin.html b/v1.0.3/_modules/moviepy/video/fx/margin.html new file mode 100644 index 000000000..2986bf74d --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/margin.html @@ -0,0 +1,268 @@ + + + + + + + + + + moviepy.video.fx.margin — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.margin
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.margin

+import numpy as np
+
+from moviepy.decorators import apply_to_mask
+from moviepy.video.VideoClip import ImageClip
+
+
+
[docs]@apply_to_mask +def margin(clip, mar=None, left=0, right=0, top=0, + bottom=0, color=(0, 0, 0), opacity = 1.0): + """ + Draws an external margin all around the frame. + + :param mar: if not ``None``, then the new clip has a margin of + size ``mar`` in pixels on the left, right, top, and bottom. + + :param left, right, top, bottom: width of the margin in pixel + in these directions. + + :param color: color of the margin. + + :param mask_margin: value of the mask on the margin. Setting + this value to 0 yields transparent margins. + + """ + + if (opacity != 1.0) and (clip.mask is None) and not (clip.ismask): + clip = clip.add_mask() + + if mar is not None: + left = right = top = bottom = mar + + def make_bg(w,h): + new_w, new_h = w + left + right, h + top + bottom + if clip.ismask: + shape = (new_h, new_w) + bg = ( np.tile(opacity, (new_h, new_w)) + .astype(float) + .reshape(shape)) + else: + shape = (new_h, new_w, 3) + bg = np.tile(color, (new_h, new_w)).reshape(shape) + return bg + + if isinstance(clip, ImageClip): + + im = make_bg(clip.w,clip.h) + im[top:top + clip.h, left:left + clip.w] = clip.img + return clip.fl_image(lambda pic:im) + + else: + + def fl(gf, t): + pic = gf(t) + h,w = pic.shape[:2] + im = make_bg(w,h) + im[top:top + h, left:left + w] = pic + return im + return clip.fl(fl)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/mask_and.html b/v1.0.3/_modules/moviepy/video/fx/mask_and.html new file mode 100644 index 000000000..07f83b382 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/mask_and.html @@ -0,0 +1,230 @@ + + + + + + + + + + moviepy.video.fx.mask_and — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.mask_and
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.mask_and

+import numpy as np
+
+from ..VideoClip import ImageClip
+
+
+
[docs]def mask_and(clip, other_clip): + """ Returns the logical 'and' (min) between two masks. + other_clip can be a mask clip or a picture (np.array). + The result has the duration of 'clip' (if it has any) + """ + + # To ensure that 'or' of two ImageClips will be an ImageClip. + if isinstance(other_clip, ImageClip): + other_clip = other_clip.img + + if isinstance(other_clip, np.ndarray): + return clip.fl_image(lambda f : np.minimum(f, other_clip)) + else: + return clip.fl(lambda gf, t : np.minimum(gf(t), + other_clip.get_frame(t)))
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/mask_color.html b/v1.0.3/_modules/moviepy/video/fx/mask_color.html new file mode 100644 index 000000000..a57fa0d91 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/mask_color.html @@ -0,0 +1,244 @@ + + + + + + + + + + moviepy.video.fx.mask_color — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.mask_color
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.mask_color

+import numpy as np
+
+
+
[docs]def mask_color(clip, color=None, thr=0, s=1): + """ Returns a new clip with a mask for transparency where the original + clip is of the given color. + + You can also have a "progressive" mask by specifying a non-nul distance + threshold thr. In this case, if the distance between a pixel and the given + color is d, the transparency will be + + d**s / (thr**s + d**s) + + which is 1 when d>>thr and 0 for d<<thr, the stiffness of the effect being + parametrized by s + """ + if color is None: + color = [0,0,0] + + color = np.array(color) + + def hill(x): + if thr: + return x**s / (thr**s + x**s) + else: + return 1.0 * (x != 0) + + def flim(im): + return hill(np.sqrt(((im-color)**2).sum(axis=2))) + + mask = clip.fl_image(flim) + mask.ismask= True + newclip = clip.set_mask(mask) + return newclip
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/mask_or.html b/v1.0.3/_modules/moviepy/video/fx/mask_or.html new file mode 100644 index 000000000..b4e8b977c --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/mask_or.html @@ -0,0 +1,230 @@ + + + + + + + + + + moviepy.video.fx.mask_or — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.mask_or
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.mask_or

+import numpy as np
+
+from ..VideoClip import ImageClip
+
+
+
[docs]def mask_or(clip, other_clip): + """ Returns the logical 'or' (max) between two masks. + other_clip can be a mask clip or a picture (np.array). + The result has the duration of 'clip' (if it has any) + """ + + # To ensure that 'or' of two ImageClips will be an ImageClip. + if isinstance(other_clip, ImageClip): + other_clip = other_clip.img + + if isinstance(other_clip, np.ndarray): + return clip.fl_image(lambda f : np.maximum(f, other_clip)) + else: + return clip.fl(lambda gf, t : np.maximum(gf(t), + other_clip.get_frame(t)))
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/mirror_x.html b/v1.0.3/_modules/moviepy/video/fx/mirror_x.html new file mode 100644 index 000000000..ae0b36c87 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/mirror_x.html @@ -0,0 +1,214 @@ + + + + + + + + + + moviepy.video.fx.mirror_x — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.mirror_x
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.mirror_x

+
+
[docs]def mirror_x(clip, apply_to= "mask"): + """ flips the clip horizontally (and its mask too, by default) """ + return clip.fl_image(lambda f: f[:,::-1], apply_to = apply_to)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/mirror_y.html b/v1.0.3/_modules/moviepy/video/fx/mirror_y.html new file mode 100644 index 000000000..606816d69 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/mirror_y.html @@ -0,0 +1,213 @@ + + + + + + + + + + moviepy.video.fx.mirror_y — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.mirror_y
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.mirror_y

+
[docs]def mirror_y(clip, apply_to= "mask"): + """ flips the clip vertically (and its mask too, by default) """ + return clip.fl_image(lambda f : f[::-1], apply_to = apply_to)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/painting.html b/v1.0.3/_modules/moviepy/video/fx/painting.html new file mode 100644 index 000000000..03ec39e65 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/painting.html @@ -0,0 +1,253 @@ + + + + + + + + + + moviepy.video.fx.painting — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.painting
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.painting

+#------- CHECKING DEPENDENCIES ----------------------------------------- 
+painting_possible = True
+try:
+    from skimage.filter import sobel
+except:
+    try:
+        from scipy.ndimage.filters import sobel
+    except:
+        painting_possible = False
+#-----------------------------------------------------------------------    
+
+
+
+import numpy as np
+
+
+def to_painting(image,saturation = 1.4,black = 0.006):
+    """ transforms any photo into some kind of painting """
+    edges = sobel(image.mean(axis=2))
+    darkening =  black*(255*np.dstack(3*[edges]))
+    painting = saturation*image-darkening
+    return np.maximum(0,np.minimum(255,painting)).astype('uint8')
+    
+def painting(clip, saturation = 1.4,black = 0.006):
+    """
+    Transforms any photo into some kind of painting. Saturation
+    tells at which point the colors of the result should be
+    flashy. ``black`` gives the anount of black lines wanted.
+    Requires Scikit-image or Scipy installed.
+    """
+    return clip.fl_image(lambda im : to_painting(im,saturation,black))
+        
+
+
+#------- OVERWRITE IF REQUIREMENTS NOT MET -----------------------------
+
+if not painting_possible:
+    doc = painting.__doc__
+
[docs] def painting(clip, newsize=None, height=None, width=None): + raise IOError("fx painting needs scikit-image or scipy")
+ + painting.__doc__ = doc +#----------------------------------------------------------------------- +
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/resize.html b/v1.0.3/_modules/moviepy/video/fx/resize.html new file mode 100644 index 000000000..14efab614 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/resize.html @@ -0,0 +1,375 @@ + + + + + + + + + + moviepy.video.fx.resize — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.resize
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.resize

+resize_possible = True
+
+try:
+    # TRY USING OpenCV AS RESIZER
+    #raise ImportError #debugging
+    import cv2
+    import numpy as np
+    def resizer (pic, newsize):
+        lx, ly = int(newsize[0]), int(newsize[1])
+        if lx > pic.shape[1] or ly > pic.shape[0]:
+            # For upsizing use linear for good quality & decent speed
+            interpolation = cv2.INTER_LINEAR
+        else:
+            # For dowsizing use area to prevent aliasing
+            interpolation = cv2.INTER_AREA
+        return cv2.resize(+pic.astype('uint8'), (lx, ly),
+                          interpolation=interpolation)
+
+    resizer.origin = "cv2"
+                
+except ImportError:
+    
+    
+    try:
+        # TRY USING PIL/PILLOW AS RESIZER
+        from PIL import Image
+        import numpy as np
+        def resizer(pic, newsize):
+            newsize = list(map(int, newsize))[::-1]
+            shape = pic.shape
+            if len(shape)==3:
+                newshape = (newsize[0],newsize[1], shape[2] )
+            else:
+                newshape = (newsize[0],newsize[1])
+                
+            pilim = Image.fromarray(pic)
+            resized_pil = pilim.resize(newsize[::-1], Image.ANTIALIAS)
+            #arr = np.fromstring(resized_pil.tostring(), dtype='uint8')
+            #arr.reshape(newshape)
+            return np.array(resized_pil)
+            
+        resizer.origin = "PIL"
+            
+    except ImportError:
+        # TRY USING SCIPY AS RESIZER
+        try:
+            from scipy.misc import imresize
+            resizer = lambda pic, newsize : imresize(pic,
+                                            map(int, newsize[::-1]))
+            resizer.origin = "Scipy"
+                                               
+        except ImportError:
+            resize_possible = False
+            
+        
+        
+    
+from moviepy.decorators import apply_to_mask
+
+
+def resize(clip, newsize=None, height=None, width=None, apply_to_mask=True):
+    """ 
+    Returns a video clip that is a resized version of the clip.
+    
+    Parameters
+    ------------
+    
+    newsize:
+      Can be either 
+        - ``(width,height)`` in pixels or a float representing
+        - A scaling factor, like 0.5
+        - A function of time returning one of these.
+            
+    width:
+      width of the new clip in pixel. The height is then computed so
+      that the width/height ratio is conserved. 
+            
+    height:
+      height of the new clip in pixel. The width is then computed so
+      that the width/height ratio is conserved.
+    
+    Examples
+    ----------
+             
+    >>> myClip.resize( (460,720) ) # New resolution: (460,720)
+    >>> myClip.resize(0.6) # width and heigth multiplied by 0.6
+    >>> myClip.resize(width=800) # height computed automatically.
+    >>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
+    
+    """
+
+    w, h = clip.size
+    
+    if newsize is not None:
+        
+        def trans_newsize(ns):
+            
+            if isinstance(ns, (int, float)):
+                return [ns * w, ns * h]
+            else:
+                return ns
+                
+        if hasattr(newsize, "__call__"):
+            
+            newsize2 = lambda t : trans_newsize(newsize(t))
+            
+            if clip.ismask:
+                
+                fun = lambda gf,t: (1.0*resizer((255 * gf(t)).astype('uint8'),
+                                                 newsize2(t))/255)
+            else:
+                
+                fun = lambda gf,t: resizer(gf(t).astype('uint8'),
+                                          newsize2(t))
+                
+            return clip.fl(fun, keep_duration=True,
+                           apply_to= (["mask"] if apply_to_mask else []))
+            
+        else:
+            
+            newsize = trans_newsize(newsize)
+        
+
+    elif height is not None:
+        
+        if hasattr(height, "__call__"):
+            fun = lambda t : 1.0*int(height(t))/h
+            return resize(clip, fun)
+
+
+        else:
+
+            newsize = [w * height / h, height]
+        
+    elif width is not None:
+
+        if hasattr(width, "__call__"):
+            fun = lambda t : 1.0*width(t)/w
+            return resize(clip, fun)
+        
+        newsize = [width, h * width / w]
+        
+        
+    # From here, the resizing is constant (not a function of time), size=newsize
+
+    if clip.ismask:
+        fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'), newsize)/255.0
+            
+    else:
+        fl = lambda pic: resizer(pic.astype('uint8'), newsize)
+
+    newclip = clip.fl_image(fl)
+
+    if apply_to_mask and clip.mask is not None:
+        newclip.mask = resize(clip.mask, newsize, apply_to_mask=False)
+
+    return newclip
+
+
+if not resize_possible:
+    
+    doc = resize.__doc__
+
[docs] def resize(clip, newsize=None, height=None, width=None): + raise ImportError("fx resize needs OpenCV or Scipy or PIL")
+ resize.__doc__ = doc +
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/rotate.html b/v1.0.3/_modules/moviepy/video/fx/rotate.html new file mode 100644 index 000000000..d77711dbf --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/rotate.html @@ -0,0 +1,283 @@ + + + + + + + + + + moviepy.video.fx.rotate — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.rotate
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.rotate

+import numpy as np
+
+from moviepy.decorators import apply_to_mask
+
+try:
+    from PIL import Image
+    PIL_FOUND = True
+    def pil_rotater(pic, angle, resample, expand):
+        return np.array( Image.fromarray(pic).rotate(angle, expand=expand,
+                                                     resample=resample))
+except ImportError:
+    PIL_FOUND = False
+
+
[docs]def rotate(clip, angle, unit='deg', resample="bicubic", expand=True): + """ + Change unit to 'rad' to define angles as radians. + If the angle is not one of 90, 180, -90, -180 (degrees) there will be + black borders. You can make them transparent with + + >>> newclip = clip.add_mask().rotate(72) + + Parameters + =========== + + clip + A video clip + + angle + Either a value or a function angle(t) representing the angle of rotation + + unit + Unit of parameter `angle` (either `deg` for degrees or `rad` for radians) + + resample + One of "nearest", "bilinear", or "bicubic". + + expand + Only applIf False, the clip will maintain the same True, the clip will be resized so that the whole + """ + + resample = {"bilinear": Image.BILINEAR, + "nearest": Image.NEAREST, + "bicubic": Image.BICUBIC}[resample] + + if not hasattr(angle, '__call__'): + # if angle is a constant, convert to a constant function + a = +angle + angle = lambda t: a + + transpo = [1,0] if clip.ismask else [1,0,2] + + def fl(gf, t): + + a = angle(t) + im = gf(t) + + if unit == 'rad': + a = 360.0*a/(2*np.pi) + + if (a==90) and expand: + return np.transpose(im, axes=transpo)[::-1] + elif (a==-90) and expand: + return np.transpose(im, axes=transpo)[:,::-1] + elif (a in [180, -180]) and expand: + return im[::-1,::-1] + elif not PIL_FOUND: + raise ValueError('Without "Pillow" installed, only angles 90, -90,' + '180 are supported, please install "Pillow" with' + "pip install pillow") + else: + return pil_rotater(im, a, resample=resample, expand=expand) + + return clip.fl(fl, apply_to=["mask"])
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/scroll.html b/v1.0.3/_modules/moviepy/video/fx/scroll.html new file mode 100644 index 000000000..a1f65d5b9 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/scroll.html @@ -0,0 +1,226 @@ + + + + + + + + + + moviepy.video.fx.scroll — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.scroll
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.scroll

+
[docs]def scroll(clip, h=None, w=None, x_speed=0, y_speed=0, + x_start=0, y_start=0, apply_to="mask"): + """ Scrolls horizontally or vertically a clip, e.g. to make end + credits """ + if h is None: h = clip.h + if w is None: w = clip.w + + xmax = clip.w-w-1 + ymax = clip.h-h-1 + + def f(gf,t): + x = int(max(0, min(xmax, x_start+ round(x_speed*t)))) + y = int(max(0, min(ymax, y_start+ round(y_speed*t)))) + return gf(t)[y:y+h, x:x+w] + + return clip.fl(f, apply_to = apply_to)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/speedx.html b/v1.0.3/_modules/moviepy/video/fx/speedx.html new file mode 100644 index 000000000..507daaa16 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/speedx.html @@ -0,0 +1,231 @@ + + + + + + + + + + moviepy.video.fx.speedx — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.speedx
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.speedx

+from moviepy.decorators import apply_to_audio, apply_to_mask
+
+
+
[docs]def speedx(clip, factor = None, final_duration=None): + """ + Returns a clip playing the current clip but at a speed multiplied + by ``factor``. Instead of factor one can indicate the desired + ``final_duration`` of the clip, and the factor will be automatically + computed. + The same effect is applied to the clip's audio and mask if any. + """ + + if final_duration: + factor = 1.0* clip.duration / final_duration + + newclip = clip.fl_time(lambda t: factor * t, apply_to=['mask', 'audio']) + + if clip.duration is not None: + newclip = newclip.set_duration(1.0 * clip.duration / factor) + + return newclip
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/supersample.html b/v1.0.3/_modules/moviepy/video/fx/supersample.html new file mode 100644 index 000000000..a5c8d76e4 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/supersample.html @@ -0,0 +1,223 @@ + + + + + + + + + + moviepy.video.fx.supersample — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.supersample
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.supersample

+import numpy as np
+
+
+
[docs]def supersample(clip, d, nframes): + """ Replaces each frame at time t by the mean of `nframes` equally spaced frames + taken in the interval [t-d, t+d]. This results in motion blur.""" + + def fl(gf, t): + tt = np.linspace(t-d, t+d, nframes) + avg = np.mean(1.0*np.array([gf(t_) for t_ in tt], dtype='uint16'), axis=0) + return avg.astype("uint8") + + return clip.fl(fl)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/time_mirror.html b/v1.0.3/_modules/moviepy/video/fx/time_mirror.html new file mode 100644 index 000000000..0d8eb4eb6 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/time_mirror.html @@ -0,0 +1,223 @@ + + + + + + + + + + moviepy.video.fx.time_mirror — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.time_mirror
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.time_mirror

+from moviepy.decorators import apply_to_audio, apply_to_mask, requires_duration
+
+
+
[docs]@requires_duration +@apply_to_mask +@apply_to_audio +def time_mirror(self): + """ + Returns a clip that plays the current clip backwards. + The clip must have its ``duration`` attribute set. + The same effect is applied to the clip's audio and mask if any. + """ + return self.fl_time(lambda t: self.duration - t, keep_duration=True)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/fx/time_symmetrize.html b/v1.0.3/_modules/moviepy/video/fx/time_symmetrize.html new file mode 100644 index 000000000..f25634c73 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/fx/time_symmetrize.html @@ -0,0 +1,227 @@ + + + + + + + + + + moviepy.video.fx.time_symmetrize — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.fx.time_symmetrize
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.fx.time_symmetrize

+from moviepy.decorators import apply_to_audio, apply_to_mask, requires_duration
+from moviepy.video.compositing.concatenate import concatenate_videoclips
+
+from .time_mirror import time_mirror
+
+
+
[docs]@requires_duration +@apply_to_mask +def time_symmetrize(clip): + """ + Returns a clip that plays the current clip once forwards and + then once backwards. This is very practival to make video that + loop well, e.g. to create animated GIFs. + This effect is automatically applied to the clip's mask and audio + if they exist. + """ + return concatenate_videoclips([clip, clip.fx( time_mirror )])
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/io/VideoFileClip.html b/v1.0.3/_modules/moviepy/video/io/VideoFileClip.html new file mode 100644 index 000000000..5cd798585 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/io/VideoFileClip.html @@ -0,0 +1,344 @@ + + + + + + + + + + moviepy.video.io.VideoFileClip — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.io.VideoFileClip
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.io.VideoFileClip

+import os
+
+from moviepy.audio.io.AudioFileClip import AudioFileClip
+from moviepy.Clip import Clip
+from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
+from moviepy.video.VideoClip import VideoClip
+
+
+
[docs]class VideoFileClip(VideoClip): + + """ + + A video clip originating from a movie file. For instance: :: + + >>> clip = VideoFileClip("myHolidays.mp4") + >>> clip.close() + >>> with VideoFileClip("myMaskVideo.avi") as clip2: + >>> pass # Implicit close called by context manager. + + + Parameters + ------------ + + filename: + The name of the video file. It can have any extension supported + by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc. + + has_mask: + Set this to 'True' if there is a mask included in the videofile. + Video files rarely contain masks, but some video codecs enable + that. For istance if you have a MoviePy VideoClip with a mask you + can save it to a videofile with a mask. (see also + ``VideoClip.write_videofile`` for more details). + + audio: + Set to `False` if the clip doesn't have any audio or if you do not + wish to read the audio. + + target_resolution: + Set to (desired_height, desired_width) to have ffmpeg resize the frames + before returning them. This is much faster than streaming in high-res + and then resizing. If either dimension is None, the frames are resized + by keeping the existing aspect ratio. + + resize_algorithm: + The algorithm used for resizing. Default: "bicubic", other popular + options include "bilinear" and "fast_bilinear". For more information, see + https://ffmpeg.org/ffmpeg-scaler.html + + fps_source: + The fps value to collect from the metadata. Set by default to 'tbr', but + can be set to 'fps', which may be helpful if importing slow-motion videos + that get messed up otherwise. + + + Attributes + ----------- + + filename: + Name of the original video file. + + fps: + Frames per second in the original file. + + + Read docs for Clip() and VideoClip() for other, more generic, attributes. + + Lifetime + -------- + + Note that this creates subprocesses and locks files. If you construct one of these instances, you must call + close() afterwards, or the subresources will not be cleaned up until the process ends. + + If copies are made, and close() is called on one, it may cause methods on the other copies to fail. + + """ + + def __init__(self, filename, has_mask=False, + audio=True, audio_buffersize=200000, + target_resolution=None, resize_algorithm='bicubic', + audio_fps=44100, audio_nbytes=2, verbose=False, + fps_source='tbr'): + + VideoClip.__init__(self) + + # Make a reader + pix_fmt = "rgba" if has_mask else "rgb24" + self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt, + target_resolution=target_resolution, + resize_algo=resize_algorithm, + fps_source=fps_source) + + # Make some of the reader's attributes accessible from the clip + self.duration = self.reader.duration + self.end = self.reader.duration + + self.fps = self.reader.fps + self.size = self.reader.size + self.rotation = self.reader.rotation + + self.filename = self.reader.filename + + if has_mask: + + self.make_frame = lambda t: self.reader.get_frame(t)[:,:,:3] + mask_mf = lambda t: self.reader.get_frame(t)[:,:,3]/255.0 + self.mask = (VideoClip(ismask=True, make_frame=mask_mf) + .set_duration(self.duration)) + self.mask.fps = self.fps + + else: + + self.make_frame = lambda t: self.reader.get_frame(t) + + # Make a reader for the audio, if any. + if audio and self.reader.infos['audio_found']: + + self.audio = AudioFileClip(filename, + buffersize=audio_buffersize, + fps=audio_fps, + nbytes=audio_nbytes) + +
[docs] def close(self): + """ Close the internal reader. """ + if self.reader: + self.reader.close() + self.reader = None + + try: + if self.audio: + self.audio.close() + self.audio = None + except AttributeError: + pass
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/io/ffmpeg_tools.html b/v1.0.3/_modules/moviepy/video/io/ffmpeg_tools.html new file mode 100644 index 000000000..f0ca4a390 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/io/ffmpeg_tools.html @@ -0,0 +1,278 @@ + + + + + + + + + + moviepy.video.io.ffmpeg_tools — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.io.ffmpeg_tools
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.io.ffmpeg_tools

+""" Misc. bindings to ffmpeg and ImageMagick."""
+
+import os
+import subprocess as sp
+import sys
+
+from moviepy.config import get_setting
+from moviepy.tools import subprocess_call
+
+
+
[docs]def ffmpeg_movie_from_frames(filename, folder, fps, digits=6, bitrate='v'): + """ + Writes a movie out of the frames (picture files) in a folder. + Almost deprecated. + """ + s = "%" + "%02d" % digits + "d.png" + cmd = [get_setting("FFMPEG_BINARY"), "-y", "-f","image2", + "-r", "%d"%fps, + "-i", os.path.join(folder,folder) + '/' + s, + "-b", "%dk"%bitrate, + "-r", "%d"%fps, + filename] + + subprocess_call(cmd)
+ + +
[docs]def ffmpeg_extract_subclip(filename, t1, t2, targetname=None): + """ Makes a new video file playing video file ``filename`` between + the times ``t1`` and ``t2``. """ + name, ext = os.path.splitext(filename) + if not targetname: + T1, T2 = [int(1000*t) for t in [t1, t2]] + targetname = "%sSUB%d_%d.%s" % (name, T1, T2, ext) + + cmd = [get_setting("FFMPEG_BINARY"),"-y", + "-ss", "%0.2f"%t1, + "-i", filename, + "-t", "%0.2f"%(t2-t1), + "-map", "0", "-vcodec", "copy", "-acodec", "copy", targetname] + + subprocess_call(cmd)
+ + +
[docs]def ffmpeg_merge_video_audio(video,audio,output, vcodec='copy', + acodec='copy', ffmpeg_output=False, + logger = 'bar'): + """ merges video file ``video`` and audio file ``audio`` into one + movie file ``output``. """ + cmd = [get_setting("FFMPEG_BINARY"), "-y", "-i", audio,"-i", video, + "-vcodec", vcodec, "-acodec", acodec, output] + + subprocess_call(cmd, logger = logger)
+ + +
[docs]def ffmpeg_extract_audio(inputfile,output,bitrate=3000,fps=44100): + """ extract the sound from a video file and save it in ``output`` """ + cmd = [get_setting("FFMPEG_BINARY"), "-y", "-i", inputfile, "-ab", "%dk"%bitrate, + "-ar", "%d"%fps, output] + subprocess_call(cmd)
+ + +
[docs]def ffmpeg_resize(video,output,size): + """ resizes ``video`` to new size ``size`` and write the result + in file ``output``. """ + cmd= [get_setting("FFMPEG_BINARY"), "-i", video, "-vf", "scale=%d:%d"%(size[0], size[1]), + output] + + subprocess_call(cmd)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/tools/credits.html b/v1.0.3/_modules/moviepy/video/tools/credits.html new file mode 100644 index 000000000..f7f56ac62 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/tools/credits.html @@ -0,0 +1,328 @@ + + + + + + + + + + moviepy.video.tools.credits — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.tools.credits
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.tools.credits

+"""
+This module contains different functions to make end and opening
+credits, even though it is difficult to fill everyone needs in this
+matter.
+"""
+
+from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
+from moviepy.video.fx.resize import resize
+from moviepy.video.VideoClip import ImageClip, TextClip
+
+
+
[docs]def credits1(creditfile, width, stretch=30, color='white', stroke_color='black', + stroke_width=2, font='Impact-Normal', fontsize=60, gap=0): + """ + + Parameters + ----------- + + creditfile + A text file whose content must be as follows: :: + + # This is a comment + # The next line says : leave 4 blank lines + .blank 4 + + ..Executive Story Editor + MARCEL DURAND + + ..Associate Producers + MARTIN MARCEL + DIDIER MARTIN + + ..Music Supervisor + JEAN DIDIER + + width + Total width of the credits text in pixels + + gap + Horizontal gap in pixels between the jobs and the names + + color + Color of the text. See ``TextClip.list('color')`` + for a list of acceptable names. + + font + Name of the font to use. See ``TextClip.list('font')`` for + the list of fonts you can use on your computer. + + fontsize + Size of font to use + + stroke_color + Color of the stroke (=contour line) of the text. If ``None``, + there will be no stroke. + + stroke_width + Width of the stroke, in pixels. Can be a float, like 1.5. + + + Returns + --------- + + image + An ImageClip instance that looks like this and can be scrolled + to make some credits: + + Executive Story Editor MARCEL DURAND + Associate Producers MARTIN MARCEL + DIDIER MARTIN + Music Supervisor JEAN DIDIER + + """ + + # PARSE THE TXT FILE + texts = [] + oneline = True + + with open(creditfile) as f: + for l in f: + if l.startswith(('\n', '#')): + # exclude blank lines or comments + continue + elif l.startswith('.blank'): + # ..blank n + for i in range(int(l.split(' ')[1])): + texts.append(['\n', '\n']) + elif l.startswith('..'): + texts.append([l[2:], '']) + oneline = True + elif oneline: + texts.append(['', l]) + oneline = False + else: + texts.append(['\n', l]) + + left, right = ("".join(l) for l in zip(*texts)) + + # MAKE TWO COLUMNS FOR THE CREDITS + left, right = [TextClip(txt, color=color, stroke_color=stroke_color, + stroke_width=stroke_width, font=font, + fontsize=fontsize, align=al) + for txt, al in [(left, 'East'), (right, 'West')]] + + cc = CompositeVideoClip([left, right.set_position((left.w + gap, 0))], + size=(left.w + right.w + gap, right.h), + bg_color=None) + + # SCALE TO THE REQUIRED SIZE + + scaled = resize(cc, width=width) + + # TRANSFORM THE WHOLE CREDIT CLIP INTO AN ImageCLip + + imclip = ImageClip(scaled.get_frame(0)) + amask = ImageClip(scaled.mask.get_frame(0), ismask=True) + + return imclip.set_mask(amask)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/tools/drawing.html b/v1.0.3/_modules/moviepy/video/tools/drawing.html new file mode 100644 index 000000000..12f34fe42 --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/tools/drawing.html @@ -0,0 +1,479 @@ + + + + + + + + + + moviepy.video.tools.drawing — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.tools.drawing
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.tools.drawing

+"""
+This module deals with making images (np arrays). It provides drawing
+methods that are difficult to do with the existing Python libraries.
+"""
+
+import numpy as np
+
+
+
[docs]def blit(im1, im2, pos=None, mask=None, ismask=False): + """ Blit an image over another. + + Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the + ``mask`` if provided. If ``im1`` and ``im2`` are mask pictures + (2D float arrays) then ``ismask`` must be ``True``. + """ + if pos is None: + pos = [0, 0] + + # xp1,yp1,xp2,yp2 = blit area on im2 + # x1,y1,x2,y2 = area of im1 to blit on im2 + xp, yp = pos + x1 = max(0, -xp) + y1 = max(0, -yp) + h1, w1 = im1.shape[:2] + h2, w2 = im2.shape[:2] + xp2 = min(w2, xp + w1) + yp2 = min(h2, yp + h1) + x2 = min(w1, w2 - xp) + y2 = min(h1, h2 - yp) + xp1 = max(0, xp) + yp1 = max(0, yp) + + if (xp1 >= xp2) or (yp1 >= yp2): + return im2 + + blitted = im1[y1:y2, x1:x2] + + new_im2 = +im2 + + if mask is None: + new_im2[yp1:yp2, xp1:xp2] = blitted + else: + mask = mask[y1:y2, x1:x2] + if len(im1.shape) == 3: + mask = np.dstack(3 * [mask]) + blit_region = new_im2[yp1:yp2, xp1:xp2] + new_im2[yp1:yp2, xp1:xp2] = (1.0 * mask * blitted + (1.0 - mask) * blit_region) + + return new_im2.astype('uint8') if (not ismask) else new_im2
+ + + +
[docs]def color_gradient(size,p1,p2=None,vector=None, r=None, col1=0,col2=1.0, + shape='linear', offset = 0): + """Draw a linear, bilinear, or radial gradient. + + The result is a picture of size ``size``, whose color varies + gradually from color `col1` in position ``p1`` to color ``col2`` + in position ``p2``. + + If it is a RGB picture the result must be transformed into + a 'uint8' array to be displayed normally: + + + Parameters + ------------ + + size + Size (width, height) in pixels of the final picture/array. + + p1, p2 + Coordinates (x,y) in pixels of the limit point for ``col1`` + and ``col2``. The color 'before' ``p1`` is ``col1`` and it + gradually changes in the direction of ``p2`` until it is ``col2`` + when it reaches ``p2``. + + vector + A vector [x,y] in pixels that can be provided instead of ``p2``. + ``p2`` is then defined as (p1 + vector). + + col1, col2 + Either floats between 0 and 1 (for gradients used in masks) + or [R,G,B] arrays (for colored gradients). + + shape + 'linear', 'bilinear', or 'circular'. + In a linear gradient the color varies in one direction, + from point ``p1`` to point ``p2``. + In a bilinear gradient it also varies symetrically form ``p1`` + in the other direction. + In a circular gradient it goes from ``col1`` to ``col2`` in all + directions. + + offset + Real number between 0 and 1 indicating the fraction of the vector + at which the gradient actually starts. For instance if ``offset`` + is 0.9 in a gradient going from p1 to p2, then the gradient will + only occur near p2 (before that everything is of color ``col1``) + If the offset is 0.9 in a radial gradient, the gradient will + occur in the region located between 90% and 100% of the radius, + this creates a blurry disc of radius d(p1,p2). + + Returns + -------- + + image + An Numpy array of dimensions (W,H,ncolors) of type float + representing the image of the gradient. + + + Examples + --------- + + >>> grad = color_gradient(blabla).astype('uint8') + + """ + + # np-arrayize and change x,y coordinates to y,x + w,h = size + + col1 = np.array(col1).astype(float) + col2 = np.array(col2).astype(float) + + if shape == 'bilinear': + if vector is None: + vector = np.array(p2) - np.array(p1) + + m1, m2 = [ color_gradient(size, p1, vector=v, col1 = 1.0, col2 = 0, + shape = 'linear', offset= offset) + for v in [vector,-vector]] + + arr = np.maximum(m1, m2) + if col1.size > 1: + arr = np.dstack(3*[arr]) + return arr*col1 + (1-arr)*col2 + + + p1 = np.array(p1[::-1]).astype(float) + + if vector is None and p2: + p2 = np.array(p2[::-1]) + vector = p2-p1 + else: + vector = np.array(vector[::-1]) + p2 = p1 + vector + + if vector: + norm = np.linalg.norm(vector) + + M = np.dstack(np.meshgrid(range(w),range(h))[::-1]).astype(float) + + if shape == 'linear': + + n_vec = vector/norm**2 # norm 1/norm(vector) + + p1 = p1 + offset*vector + arr = (M- p1).dot(n_vec)/(1-offset) + arr = np.minimum(1,np.maximum(0,arr)) + if col1.size > 1: + arr = np.dstack(3*[arr]) + return arr*col1 + (1-arr)*col2 + + elif shape == 'radial': + if r is None: + r = norm + + if r == 0: + arr = np.ones((h,w)) + else: + arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * r + arr = arr / ((1-offset)*r) + arr = np.minimum(1.0, np.maximum(0, arr)) + + if col1.size > 1: + arr = np.dstack(3*[arr]) + return (1-arr)*col1 + arr*col2
+ + +
[docs]def color_split(size,x=None,y=None,p1=None,p2=None,vector=None, + col1=0,col2=1.0, grad_width=0): + """Make an image splitted in 2 colored regions. + + Returns an array of size ``size`` divided in two regions called 1 and + 2 in wht follows, and which will have colors col& and col2 + respectively. + + Parameters + ----------- + + x: (int) + If provided, the image is splitted horizontally in x, the left + region being region 1. + + y: (int) + If provided, the image is splitted vertically in y, the top region + being region 1. + + p1,p2: + Positions (x1,y1),(x2,y2) in pixels, where the numbers can be + floats. Region 1 is defined as the whole region on the left when + going from ``p1`` to ``p2``. + + p1, vector: + ``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be + floats. Region 1 is then the region on the left when starting + in position ``p1`` and going in the direction given by ``vector``. + + gradient_width + If not zero, the split is not sharp, but gradual over a region of + width ``gradient_width`` (in pixels). This is preferable in many + situations (for instance for antialiasing). + + + Examples + --------- + + >>> size = [200,200] + >>> # an image with all pixels with x<50 =0, the others =1 + >>> color_split(size, x=50, col1=0, col2=1) + >>> # an image with all pixels with y<50 red, the others green + >>> color_split(size, x=50, col1=[255,0,0], col2=[0,255,0]) + >>> # An image splitted along an arbitrary line (see below) + >>> color_split(size, p1=[20,50], p2=[25,70] col1=0, col2=1) + + """ + + if grad_width or ( (x is None) and (y is None)): + if p2 is not None: + vector = (np.array(p2) - np.array(p1)) + elif x is not None: + vector = np.array([0,-1.0]) + p1 = np.array([x, 0]) + elif y is not None: + vector = np.array([1.0, 0.0]) + p1 = np.array([0,y]) + + x,y = vector + vector = np.array([y,-x]).astype('float') + norm = np.linalg.norm(vector) + vector = max(0.1, grad_width) * vector / norm + return color_gradient(size,p1,vector=vector, + col1 = col1, col2 = col2, shape='linear') + else: + w, h = size + shape = (h, w) if np.isscalar(col1) else (h, w, len(col1)) + arr = np.zeros(shape) + if x: + arr[:,:x] = col1 + arr[:,x:] = col2 + elif y: + arr[:y] = col1 + arr[y:] = col2 + return arr + + # if we are here, it means we didn't exit with a proper 'return' + print( "Arguments in color_split not understood !" ) + raise
+ +
[docs]def circle(screensize, center, radius, col1=1.0, col2=0, blur=1): + """ Draw an image with a circle. + + Draws a circle of color ``col1``, on a background of color ``col2``, + on a screen of size ``screensize`` at the position ``center=(x,y)``, + with a radius ``radius`` but slightly blurred on the border by ``blur`` + pixels + """ + offset = 1.0*(radius-blur)/radius if radius else 0 + return color_gradient(screensize,p1=center,r=radius, col1=col1, + col2=col2, shape='radial', offset=offset)
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/_modules/moviepy/video/tools/subtitles.html b/v1.0.3/_modules/moviepy/video/tools/subtitles.html new file mode 100644 index 000000000..6fc246b1c --- /dev/null +++ b/v1.0.3/_modules/moviepy/video/tools/subtitles.html @@ -0,0 +1,373 @@ + + + + + + + + + + moviepy.video.tools.subtitles — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Module code »
  • + +
  • moviepy.video.tools.subtitles
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for moviepy.video.tools.subtitles

+""" Experimental module for subtitles support. """
+
+import re
+
+import numpy as np
+
+from moviepy.tools import cvsecs
+from moviepy.video.VideoClip import TextClip, VideoClip
+
+
+
[docs]class SubtitlesClip(VideoClip): + """ A Clip that serves as "subtitle track" in videos. + + One particularity of this class is that the images of the + subtitle texts are not generated beforehand, but only if + needed. + + Parameters + ========== + + subtitles + Either the name of a file, or a list + + Examples + ========= + + >>> from moviepy.video.tools.subtitles import SubtitlesClip + >>> from moviepy.video.io.VideoFileClip import VideoFileClip + >>> generator = lambda txt: TextClip(txt, font='Georgia-Regular', fontsize=24, color='white') + >>> sub = SubtitlesClip("subtitles.srt", generator) + >>> myvideo = VideoFileClip("myvideo.avi") + >>> final = CompositeVideoClip([clip, subtitles]) + >>> final.write_videofile("final.mp4", fps=myvideo.fps) + + """ + + def __init__(self, subtitles, make_textclip=None): + + VideoClip.__init__(self, has_constant_size=False) + + if isinstance(subtitles, str): + subtitles = file_to_subtitles(subtitles) + + #subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles] + self.subtitles = subtitles + self.textclips = dict() + + if make_textclip is None: + make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold', + fontsize=24, color='white', + stroke_color='black', stroke_width=0.5) + + self.make_textclip = make_textclip + self.start=0 + self.duration = max([tb for ((ta,tb), txt) in self.subtitles]) + self.end=self.duration + + def add_textclip_if_none(t): + """ Will generate a textclip if it hasn't been generated asked + to generate it yet. If there is no subtitle to show at t, return + false. """ + sub =[((ta,tb),txt) for ((ta,tb),txt) in self.textclips.keys() + if (ta<=t<tb)] + if not sub: + sub = [((ta,tb),txt) for ((ta,tb),txt) in self.subtitles if + (ta<=t<tb)] + if not sub: + return False + sub = sub[0] + if sub not in self.textclips.keys(): + self.textclips[sub] = self.make_textclip(sub[1]) + + return sub + + def make_frame(t): + sub = add_textclip_if_none(t) + return (self.textclips[sub].get_frame(t) if sub + else np.array([[[0,0,0]]])) + + def make_mask_frame(t): + sub = add_textclip_if_none(t) + return (self.textclips[sub].mask.get_frame(t) if sub + else np.array([[0]])) + + self.make_frame = make_frame + hasmask = bool(self.make_textclip('T').mask) + self.mask = VideoClip(make_mask_frame, ismask=True) if hasmask else None + +
[docs] def in_subclip(self, t_start= None, t_end= None): + """ Returns a sequence of [(t1,t2), txt] covering all the given subclip + from t_start to t_end. The first and last times will be cropped so as + to be exactly t_start and t_end if possible. """ + + def is_in_subclip(t1,t2): + try: + return (t_start<=t1<t_end) or (t_start< t2 <=t_end) + except: + return False + def try_cropping(t1,t2): + try: + return (max(t1, t_start), min(t2, t_end)) + except: + return (t1, t2) + return [(try_cropping(t1,t2), txt) for ((t1,t2), txt) in self.subtitles + if is_in_subclip(t1,t2)]
+ + + + def __iter__(self): + return iter(self.subtitles) + + + + def __getitem__(self, k): + return self.subtitles[k] + + + + def __str__(self): + + def to_srt(sub_element): + (ta, tb), txt = sub_element + fta = cvsecs(ta) + ftb = cvsecs(tb) + return "%s - %s\n%s"%(fta, ftb, txt) + + return "\n\n".join(to_srt(s) for s in self.subtitles) + + + + def match_expr(self, expr): + + return SubtitlesClip([e for e in self.subtitles + if re.findall(expr, e[1]) != []]) + + + def write_srt(self, filename): + with open(filename, 'w+') as f: + f.write(str(self))
+ + +
[docs]def file_to_subtitles(filename): + """ Converts a srt file into subtitles. + + The returned list is of the form ``[((ta,tb),'some text'),...]`` + and can be fed to SubtitlesClip. + + Only works for '.srt' format for the moment. + """ + times_texts = [] + current_times = None + current_text = "" + with open(filename,'r') as f: + for line in f: + times = re.findall("([0-9]*:[0-9]*:[0-9]*,[0-9]*)", line) + if times: + current_times = [cvsecs(t) for t in times] + elif line.strip() == '': + times_texts.append((current_times, current_text.strip('\n'))) + current_times, current_text = None, "" + elif current_times: + current_text += line + return times_texts
+
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/_sources/getting_started/FAQ.rst.txt b/v1.0.3/_sources/FAQ.rst.txt similarity index 78% rename from _sources/getting_started/FAQ.rst.txt rename to v1.0.3/_sources/FAQ.rst.txt index 56903801e..6eaadf980 100644 --- a/_sources/getting_started/FAQ.rst.txt +++ b/v1.0.3/_sources/FAQ.rst.txt @@ -1,31 +1,31 @@ FAQ and troubleshooting ========================= -This section intend to answer the most common questions and errors. +This section will fill up as MoviePy advances through the next steps of +development (currently on the roadmap: MoviePy Studio, MoviePy WebApp, MoviePy OS, MoviePy +Trust Inc., and the MoviePy Charity Fundation). Common errors that are not bugs -------------------------------- These are very common errors which are not considered as bugs to be solved (but you can still ask for this to change). If these answers -don't work for you, please open a bug report on Github_, or on the dedicated forum on Reddit_. - +don't work for you, please open a bug report on Github_, or on the dedicated forum on Reddit_, or on the librelist_. MoviePy generated a video that cannot be read by my favorite player. """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + Known reason: one of the video's dimensions were not even, for instance 720x405, and you used a MPEG4 codec like libx264 (default in MoviePy). In this case the video generated uses a format that is readable only on some readers like VLC. - I can't seem to read any video with MoviePy """""""""""""""""""""""""""""""""""""""""""""" Known reason: you have a deprecated version of FFMPEG, install a recent version from the -website, not from your OS's repositories! (see :ref:`install`). - +website, not from your OS's repositories ! (see :ref:`install`). Previewing videos make them slower than they are """"""""""""""""""""""""""""""""""""""""""""""""" @@ -34,4 +34,5 @@ It means that your computer is not good enough to render the clip in real time. .. _Github: https://github.com/Zulko/moviepy .. _Reddit: https://www.reddit.com/r/moviepy/ +.. _librelist: mailto:moviepy@librelist.com diff --git a/v1.0.3/_sources/advanced_tools/advanced_tools.rst.txt b/v1.0.3/_sources/advanced_tools/advanced_tools.rst.txt new file mode 100644 index 000000000..4763b6803 --- /dev/null +++ b/v1.0.3/_sources/advanced_tools/advanced_tools.rst.txt @@ -0,0 +1,18 @@ +.. _advancedtools: + +Advanced tools +=============== + +This section will briefly present the submodule ``moviepy.video.tools`` that can help you edit videos. It's not ready yet, see :ref:`ref_videotools` (the same in more complete and more technical) instead. + +Tracking +~~~~~~~~~ + +Cuts +~~~~~~~~ + +Subtitles +~~~~~~~~~~ + +Credits +~~~~~~~~ \ No newline at end of file diff --git a/_sources/getting_started/docker.rst.txt b/v1.0.3/_sources/docker.rst.txt similarity index 64% rename from _sources/getting_started/docker.rst.txt rename to v1.0.3/_sources/docker.rst.txt index 8b983c20a..7d5033235 100644 --- a/_sources/getting_started/docker.rst.txt +++ b/v1.0.3/_sources/docker.rst.txt @@ -1,25 +1,27 @@ -MoviePy Docker +Moviepy Docker =============== -Prerequisites +Prequisites ------------- -Docker installed `Docker for Mac, Docker for windows, linux, etc `_ - -Build the docker ------------------ -1. Move into the moviepy root dir +1. Docker installed `Docker for Mac, Docker for windows, linux, etc `_ 2. Build the Dockerfile :: docker build -t moviepy -f Dockerfile . -How to run the unittests from docker +Steps to run the git repo unittests from docker ------------------------------------------------ -Run pytest inside the container with the following command :: +Get a bash prompt in the moviepy container :: + + cd tests + docker run -it -v `pwd`:/tests moviepy bash - docker run -w /moviepy -it moviepy python -m pytest +Run the tests :: + + cd tests + python test_issues.py Running your own moviepy script from docker -------------------------------------------- diff --git a/v1.0.3/_sources/examples/compo_from_image.rst.txt b/v1.0.3/_sources/examples/compo_from_image.rst.txt new file mode 100644 index 000000000..c6f31fd08 --- /dev/null +++ b/v1.0.3/_sources/examples/compo_from_image.rst.txt @@ -0,0 +1,23 @@ +====================================== +Placing clips according to a picture +====================================== + + +So how do you do some complex compositing like this ? + +.. raw:: html + +
+ +It takes a lot of bad taste, and a segmenting tool + +In this script we will use this image (generated with Inkscape): + +.. figure:: compo_from_image.jpeg + +We will find the regions of this image and fit the different clips into these regions: + +.. literalinclude:: ../../examples/compo_from_image.py + + +(note that some pictures are distorted here as their size has been modified without care for their aspect ratio. This could be changed with a few more lines.) diff --git a/v1.0.3/_sources/examples/dancing_knights.rst.txt b/v1.0.3/_sources/examples/dancing_knights.rst.txt new file mode 100644 index 000000000..d762a1744 --- /dev/null +++ b/v1.0.3/_sources/examples/dancing_knights.rst.txt @@ -0,0 +1,16 @@ +========================================== +A reconstitution of 15th century dancing +========================================== + +And now for something very silly... + +.. raw:: html + +
+ +
+ +.. literalinclude:: ../../examples/dancing_knights.py diff --git a/v1.0.3/_sources/examples/example_with_sound.rst.txt b/v1.0.3/_sources/examples/example_with_sound.rst.txt new file mode 100644 index 000000000..5dcb3b2c6 --- /dev/null +++ b/v1.0.3/_sources/examples/example_with_sound.rst.txt @@ -0,0 +1,14 @@ +.. soundexample: + +An example with sound +------------------------ + +An example of using MoviePy to assemble movie clips with sounds. Here are two scenes of Charade put together: + +.. raw:: html + +
+ +Here is the code: + +.. literalinclude:: ../../examples/example_with_sound.py diff --git a/v1.0.3/_sources/examples/examples.rst.txt b/v1.0.3/_sources/examples/examples.rst.txt new file mode 100644 index 000000000..98a9a5a5f --- /dev/null +++ b/v1.0.3/_sources/examples/examples.rst.txt @@ -0,0 +1,28 @@ +.. _examples: + +Example Scripts +=============== + +Here are a few example scripts to get you started. Most are quite old now and will be soon replaced. + + +.. toctree:: + :maxdepth: 1 + + moving_letters + dancing_knights + ukulele_concerto + example_with_sound + star_worms + masked_credits + painting_effect + compo_from_image + logo + headblur + quick_recipes + several_characters + the_end + + + + diff --git a/v1.0.3/_sources/examples/headblur.rst.txt b/v1.0.3/_sources/examples/headblur.rst.txt new file mode 100644 index 000000000..1330234c6 --- /dev/null +++ b/v1.0.3/_sources/examples/headblur.rst.txt @@ -0,0 +1,12 @@ +======================================== +Tracking and blurring someone's face +======================================== + +.. raw:: html + +
+ +First we will need to track the face, i.e. to get two functions ``fx`` and ``fy`` such that ``(fx(t),fy(t))`` gives the position of the center of the head at time ``t``. This will be easily done with +`manual_tracking`. Then we will need to blur the area of the video around the center of the head. + +.. literalinclude:: ../../examples/headblur.py diff --git a/v1.0.3/_sources/examples/logo.rst.txt b/v1.0.3/_sources/examples/logo.rst.txt new file mode 100644 index 000000000..a961a5463 --- /dev/null +++ b/v1.0.3/_sources/examples/logo.rst.txt @@ -0,0 +1,8 @@ +================================= +MoviePy logo with a moving shadow +================================= +.. raw:: html + +
+ +Here the logo is a picture, while the shadow is actually a black rectangle taking the whole screen, overlaid over the logo, but with a moving mask composed of a bi-gradient, such that only one (moving) part of the rectangle is visible. diff --git a/v1.0.3/_sources/examples/masked_credits.rst.txt b/v1.0.3/_sources/examples/masked_credits.rst.txt new file mode 100644 index 000000000..92519d343 --- /dev/null +++ b/v1.0.3/_sources/examples/masked_credits.rst.txt @@ -0,0 +1,19 @@ +.. _mountainMask: + +Partially Hidden credits +----------------------------------- + +.. raw:: html + +
+ +Before seeing the code for this video, here is a tutorial video that explains the different steps (also made with MoviePy): + +.. raw:: html + +
+ + +And here is the code: + +.. literalinclude:: ../../examples/masked_credits.py diff --git a/v1.0.3/_sources/examples/moving_letters.rst.txt b/v1.0.3/_sources/examples/moving_letters.rst.txt new file mode 100644 index 000000000..4654a9121 --- /dev/null +++ b/v1.0.3/_sources/examples/moving_letters.rst.txt @@ -0,0 +1,13 @@ +============================= +Text with moving letters +============================= + +I think this example illustrates well the interest of script-based editing (imagine doing that by hand). + +.. raw:: html + +
+ +Here is the code: + +.. literalinclude:: ../../examples/moving_letters.py diff --git a/v1.0.3/_sources/examples/painting_effect.rst.txt b/v1.0.3/_sources/examples/painting_effect.rst.txt new file mode 100644 index 000000000..638f2c2a6 --- /dev/null +++ b/v1.0.3/_sources/examples/painting_effect.rst.txt @@ -0,0 +1,29 @@ +============================================= +Freezing a movie frame with a painting effect +============================================= + +That's an effect that we have seen a lot in westerns and such. + +.. raw:: html + +
+ +The recipe used to make a photo look like a painting: + +- Find the edges of the image with the Sobel algorithm. You obtain + what looks like a black and white hand-drawing of the photo. +- Multiply the image array to make the colors flashier, and add the contours + obtained at the previous step. + +The final clip will be the concatenation of three part: the part before +the effect, the part with the effect, and the part after the effect. +The part with the effect is obtained as follows: + +- Take the frame to freeze and make a "painted image" of it. Make it a clip. +- Add a text clip saying "Audrey" to the "painted image" clip. +- Overlay the painted clip over the original frame, but make it appear and + disappear with a fading effect. + +Here you are for the code: + +.. literalinclude:: ../../examples/painting_effect.py diff --git a/v1.0.3/_sources/examples/quick_recipes.rst.txt b/v1.0.3/_sources/examples/quick_recipes.rst.txt new file mode 100644 index 000000000..eeaee635b --- /dev/null +++ b/v1.0.3/_sources/examples/quick_recipes.rst.txt @@ -0,0 +1,65 @@ +Quick recipes +=============== + + + +Effects and filters +--------------------- + +Blurring all frames of a video +""""""""""""""""""""""""""""""" + +:: + + from skimage.filters import gaussian_filter + from moviepy.editor import VideoFileClip + + def blur(image): + """ Returns a blurred (radius=2 pixels) version of the image """ + return gaussian_filter(image.astype(float), sigma=2) + + clip = VideoFileClip("my_video.mp4") + clip_blurred = clip.fl_image( blur ) + clip_blurred.write_videofile("blurred_video.mp4") + + + +Cutting videos +--------------- + +Scene detection +---------------- + + +Compositing videos +------------------- + +Add a title before a video +""""""""""""""""""""""""""" + + +Art of Gif-making +------------------- + + clip.fx( vfx.time_symmetrize) + + + # find a subclip + T = clip + +Useless but fun +---------------- + + +Getting the average frame of a video +""""""""""""""""""""""""""""""""""""" +:: + + from moviepy.editor import VideoFileClip, ImageClip + clip = VideoFileClip("video.mp4") + fps= 1.0 # take one frame per second + nframes = clip.duration*fps # total number of frames used + total_image = sum(clip.iter_frames(fps,dtype=float,logger='bar')) + average_image = ImageClip(total_image/ nframes) + average_image.save_frame("average_test.png") + diff --git a/v1.0.3/_sources/examples/several_characters.rst.txt b/v1.0.3/_sources/examples/several_characters.rst.txt new file mode 100644 index 000000000..3a383d455 --- /dev/null +++ b/v1.0.3/_sources/examples/several_characters.rst.txt @@ -0,0 +1,11 @@ +.. char_duplication: + +Character duplication in a video +----------------------------------- + + +.. raw:: html + +
+ +So blabla diff --git a/v1.0.3/_sources/examples/star_worms.rst.txt b/v1.0.3/_sources/examples/star_worms.rst.txt new file mode 100644 index 000000000..7a41e3a2d --- /dev/null +++ b/v1.0.3/_sources/examples/star_worms.rst.txt @@ -0,0 +1,16 @@ +A Star-Wars like opening title +------------------------------- + +This is an approximate effect (the perspective would require some more complex transformations) but it is a nice exercise. + +Warning: clip with sound. + +Let us also have a look at this tutorial which shows the different steps: + +.. raw:: html + +
+ +And here you are for the code, and for the code of the tutorial. + +.. literalinclude:: ../../examples/star_worms.py diff --git a/v1.0.3/_sources/examples/the_end.rst.txt b/v1.0.3/_sources/examples/the_end.rst.txt new file mode 100644 index 000000000..e093b0579 --- /dev/null +++ b/v1.0.3/_sources/examples/the_end.rst.txt @@ -0,0 +1,16 @@ +====================== +"The End" effect +====================== + +.. raw:: html + +
+ +So let's explain this one: there is a clip with "The End" written in the middle, and *above* this +clip there is the actual movie. The actual movie has a mask which represents +a white (=opaque) circle on a black (=transparent) background. At the begining, +that circle is so large that you see all the actual movie and you don't see +the "The End" clip. Then the circle becomes progressively smaller and as a +consequence you see less of the actual movie and more of the "The End" clip. + +.. literalinclude:: ../../examples/the_end.py diff --git a/v1.0.3/_sources/examples/ukulele_concerto.rst.txt b/v1.0.3/_sources/examples/ukulele_concerto.rst.txt new file mode 100644 index 000000000..d5ec1b90a --- /dev/null +++ b/v1.0.3/_sources/examples/ukulele_concerto.rst.txt @@ -0,0 +1,14 @@ +====================== +A simple music video +====================== + +.. raw:: html + +
+ +This is an example, with no sound (lame for a music video), soon to be +replaced with a real music video example (the code will be 99% the same). +The philosophy of MoviePy is that for each new music video I will make, +I will just have to copy/paste this code, and modify a few lines. + +.. literalinclude:: ../../examples/ukulele_concerto.py diff --git a/v1.0.3/_sources/gallery.rst.txt b/v1.0.3/_sources/gallery.rst.txt new file mode 100644 index 000000000..2c5827b62 --- /dev/null +++ b/v1.0.3/_sources/gallery.rst.txt @@ -0,0 +1,199 @@ +.. _gallery: + + +Gallery +======== + +Here are a few projects using MoviePy. The gallery will fill up as more people start using MoviePy (which is currently one year old). If you have a nice project using MoviePy let us know ! + +Videos edited with Moviepy +--------------------------- + + +The Cup Song Covers Mix +~~~~~~~~~~~~~~~~~~~~~~~~ + +This mix of 60 covers of the Cup Song demonstrates the non-linear video editing capabilities of MoviePy. Here is `the (undocumented) MoviePy code `_ that generated the video. + +.. raw:: html + +
+ +
+ +The (old) MoviePy reel video. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Made when MoviePy was a few weeks old and not as good as now. The code for most scenes can be found +in the :ref:`examples`. + +.. raw:: html + +
+ +
+ + +Animations edited with MoviePy +------------------------------ + + +GIFs made from videos +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This `gifs tutorial +`_ gives you the basics to make gifs from video files (cutting, croping, adding text...). The last example shows how to remove a (still) background to keep only the animated part of a video. + + +.. raw:: html + + + +Vector Animations +~~~~~~~~~~~~~~~~~~~ + +This `vector animations tutorial `_ shows how to combine MoviePy with Gizeh to create animations: + +.. raw:: html + + + + +It is also possible to combine MoviePy with other graphic librairies like matplotlib, etc. + + +3D animations +~~~~~~~~~~~~~~~~~~~ + +This `3d animation tutorial `_ shows how to combine MoviePy with Vapory, a library to render 3D scenes using the free ray-tracer POV-Ray + +.. raw:: html + + + + +With Vapory and MoviePy you can for instance embed a movie in a 3D scene: + + +.. raw:: html + +
+ +
+ + +Or render the result of this physics simulation made with PyODE (`script `_): + +.. raw:: html + + + + +Or use `this script `_ to make piano animations from MIDI files (which are some sort of electronic sheet music): + + +.. raw:: html + +
+ +
+ +Data animations +---------------- + +This `data animation tutorial `_ shows how to use MoviePy to animate the different Python vizualization libraries: Mayavi, Vispy, Scikit-image, Matplotlib, etc. + + +Scientific or technological projects +------------------------------------- + + +Piano rolls transcription to sheet music +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This `transcribing piano rolls blog post `_ explains how to transform a video of a piano roll performance into playable sheet music. MoviePy is used for the frame-by-frame analysis of the piano roll video. The last video is also edited with MoviePy: + +.. raw:: html + +
+ +
+ + + +Misc. Programs and Scripts using MoviePy +------------------------------------------ +Kapwing +---------- + +`Kapwing `_ is an online video meme generator. Content creators use Kapwing to add text around their videos, which results in higher engagement / views on social media sites like Facebook. Kapwing's creation process is powered by MoviePy! MoviePy is used to add the text, borders, and attribution directly to the uploaded videos. + + +.. raw:: html + + + + +Rinconcam +---------- + +`Rincomcam `_ is a camera which films surfers on the Californian beach of Point Rincon. At the end of each day it cuts together a video, puts it online, and tweets it. Everything is entirely automatized with Python. +MoviePy is used to add transitions, titles and music to the videos. + + +.. raw:: html + + + + +Videogrep +~~~~~~~~~~ + +Videogrep is a python script written by Sam Lavigne, that goes through the subtitle tracks of movies and makes supercuts based on what it finds. For instance, here is an automatic supercut of every time the White House press secretary tells us what he can tell us: + +.. raw:: html + +
+ +
+ +Here are `Videogrep's introductory blog post +`_ and the Github `Videogrep page `_. + +If you liked it, also have a look at these Videogrep-inspired projects: + +This `Videogrep blog post `_ attempts to cut a video precisely at the beginning and end of sentences or words: :: + + words = ["Americans", "must", "develop", "open ", "source", + " software", "for the", " rest ", "of the world", + "instead of", " soldiers"] + numbers = [3,0,4,3,4,0,1,2,0,1,0] # take clip number 'n' + + cuts = [find_word(word)[n] for (word,n) in zip(words, numbers)] + assemble_cuts(cuts, "fake_speech.mp4") + +.. raw:: html + +
+ +
+ + +This `other post `_ uses MoviePy to automatically cut together `all the highlights of a soccer game `_, based on the fact that the crowd cheers louder when something interesting happens. All in under 30 lines of Python: + diff --git a/v1.0.3/_sources/getting_started/audioclips.rst.txt b/v1.0.3/_sources/getting_started/audioclips.rst.txt new file mode 100644 index 000000000..df17e148b --- /dev/null +++ b/v1.0.3/_sources/getting_started/audioclips.rst.txt @@ -0,0 +1,40 @@ +.. _audioclips: + +Audio in MoviePy +----------------- + +This section shows how to use MoviePy to create and edit audio clips. + +Note that when you cut, mix or concatenate video clips in MoviePy the audio is automatically handled and you need to worry about it. This section is of interest if you just want to edit audiofiles or you want custom audio clips for your videos. + +What audioclips are made of +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +AudioClips are very similar to video clips in moviepy: they have a length, can be cut and composed the same way, etc. A notable difference be composed +``audioclip.get_frame(t)`` + +Creating a new audio clip +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Audio clips can be created from an audio file or from the soundtrack of a video file :: + + from moviepy.editor import * + audioclip = AudioFileClip("some_audiofile.mp3") + audioclip = AudioFileClip("some_video.avi") + +for more, see :py:class:`~moviepy.audio.io.AudioFileClip.AudioFileClip`. + +Alternatively you can get the audio track of an already created video clip :: + + videoclip = VideoFileClip("some_video.avi") + audioclip = videoclip.audio + +Compositing audio clips +~~~~~~~~~~~~~~~~~~~~~~~~ + +Exporting and previewing audio clips +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can also export assign an audio clip as the soundtrack of a video clip with :: + + videoclip2 = videoclip.set_audio(my_audioclip) diff --git a/v1.0.3/_sources/getting_started/compositing.rst.txt b/v1.0.3/_sources/getting_started/compositing.rst.txt new file mode 100644 index 000000000..082b576bb --- /dev/null +++ b/v1.0.3/_sources/getting_started/compositing.rst.txt @@ -0,0 +1,134 @@ +.. _CompositeVideoClips: + +Mixing clips +============= + +Video composition, also known as non-linear editing, is the fact of playing several clips together in a new clip. This video is a good example of what compositing you can do with MoviePy: + +.. raw:: html + +
+ +
+ +Before starting, note that video clips generally carry an audio track and a mask, which are also clips. When you compose these clips together, the soundtrack and mask of the final clip are automatically generated by putting together the soundtracks and masks of the clips. So most of the time you don't need to worry about mixing the audio and masks. + +Stacking and concatenating clips +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Two simple ways of putting clips together is to concatenate them (to play them one after the other in a single long clip) or to stack them (to them side by side in a single larger clip). + +Concatenation is done with the function ``concatenate_videoclips``: :: + + from moviepy.editor import VideoFileClip, concatenate_videoclips + clip1 = VideoFileClip("myvideo.mp4") + clip2 = VideoFileClip("myvideo2.mp4").subclip(50,60) + clip3 = VideoFileClip("myvideo3.mp4") + final_clip = concatenate_videoclips([clip1,clip2,clip3]) + final_clip.write_videofile("my_concatenation.mp4") + + +The ``final_clip`` is a clip that plays the clips 1, 2, and 3 one after the other. Note that the clips do not need to be the same size. If they arent's they will all appear centered in a clip large enough to contain the biggest of them, with optionnally a color of your choosing to fill the borders. You have many other options there (see the doc of the function). You can for instance play a transition clip between the clips with the option ``transition=my_clip``. + +Stacking is done with ``clip_array``: :: + + from moviepy.editor import VideoFileClip, clips_array, vfx + clip1 = VideoFileClip("myvideo.mp4").margin(10) # add 10px contour + clip2 = clip1.fx( vfx.mirror_x) + clip3 = clip1.fx( vfx.mirror_y) + clip4 = clip1.resize(0.60) # downsize 60% + final_clip = clips_array([[clip1, clip2], + [clip3, clip4]]) + final_clip.resize(width=480).write_videofile("my_stack.mp4") + +You obtain a clip which looks like this: + +.. figure:: stacked.jpeg + :align: center + + +CompositeVideoClips +~~~~~~~~~~~~~~~~~~~~~ + +The `CompositeVideoClip` class provides a very flexible way to compose clips, but is more complex than ``concatenate_videoclips`` and ``clips_array`` :: + + video = CompositeVideoClip([clip1,clip2,clip3]) + +Now ``video`` plays ``clip1``, and ``clip2`` *on top of* ``clip1``, and ``clip3`` on top of ``clip1``, and ``clip2``. For instance, if ``clip2`` and ``clip3`` have the same size as ``clip1``, then only ``clip3``, which is on top, will be visible in the video... unless ``clip3`` and ``clip2`` have masks which hide parts of them. Note that by default the composition has the size of its first clip (as it is generally a *background*). But sometimes you will want to make your clips *float* in a bigger composition, so you will specify the size of the final composition as follows :: + + video = CompositeVideoClip([clip1,clip2,clip3], size=(720,460)) + +Starting and stopping times +"""""""""""""""""""""""""""" + +In a CompositionClip, all the clips start to play at a time that is specified by the ``clip.start`` attribute. You can set this starting time as follows: :: + + clip1 = clip1.set_start(5) # start after 5 seconds + +So for instance your composition will look like :: + + video = CompositeVideoClip([clip1, # starts at t=0 + clip2.set_start(5), # start at t=5s + clip3.set_start(9)]) # start at t=9s + +In the example above, maybe ``clip2`` will start before ``clip1`` is over. In this case you can make ``clip2`` appear with a *fade-in* effect of one second: :: + + video = CompositeVideoClip([clip1, # starts at t=0 + clip2.set_start(5).crossfadein(1), + clip3.set_start(9).crossfadein(1.5)]) + +Positioning clips +"""""""""""""""""" + +If ``clip2`` and ``clip3`` are smaller than ``clip1``, you can decide where they will appear in the composition by setting their position. Here we indicate the coordinates of the top-left pixel of the clips: :: + + video = CompositeVideoClip([clip1, + clip2.set_position((45,150)), + clip3.set_position((90,100))]) + +There are many ways to specify the position: :: + + clip2.set_position((45,150)) # x=45, y=150 , in pixels + + clip2.set_position("center") # automatically centered + + # clip2 is horizontally centered, and at the top of the picture + clip2.set_position(("center","top")) + + # clip2 is vertically centered, at the left of the picture + clip2.set_position(("left","center")) + + # clip2 is at 40% of the width, 70% of the height of the screen: + clip2.set_position((0.4,0.7), relative=True) + + # clip2's position is horizontally centered, and moving down ! + clip2.set_position(lambda t: ('center', 50+t) ) + +When indicating the position keep in mind that the ``y`` coordinate has its zero at the top of the picture: + +.. figure:: videoWH.jpeg + +.. Transitions +.. ------------ + +.. Everyone loves transitions between clips: fade-ins, fade-out, clips that slide in front of the previous one... everything is good to impress your grandparents. + +.. In MoviePy, transitions are effects (see :ref:`effects`_) from the module moviepy.video.compositing. + + +Compositing audio clips +------------------------- + +When you mix video clips together, MoviePy will automatically compose their respective audio tracks to form the audio track of the final clip, so you don't need to worry about compositing these tracks yourself. + +If you want to make a custom audiotrack from several audio sources: audioc clips can be mixed together with ``CompositeAudioClip`` and ``concatenate_audioclips``: :: + + from moviepy.editor import * + # ... make some audio clips aclip1, aclip2, aclip3 + concat = concatenate_audioclips([aclip1, aclip2, aclip3]) + compo = CompositeAudioClip([aclip1.volumex(1.2), + aclip2.set_start(5), # start at t=5s + aclip3.set_start(9)]) + diff --git a/v1.0.3/_sources/getting_started/effects.rst.txt b/v1.0.3/_sources/getting_started/effects.rst.txt new file mode 100644 index 000000000..923c3a15f --- /dev/null +++ b/v1.0.3/_sources/getting_started/effects.rst.txt @@ -0,0 +1,101 @@ +.. _effects: + +Clips transformations and effects +=================================== + +There are several categories of clip modifications in MoviePy: + +- The very common methods to change the attributes of a clip: ``clip.set_duration``, ``clip.set_audio``, ``clip.set_mask``, ``clip.set_start`` etc. +- The already-implemented effects. Core effects like ``clip.subclip(t1, t2)`` (keep only the cut between t1 and t2), which are very important, are implemented as class methods. More advanced and less common effects like ``loop`` (makes the clip play in a loop) or ``time_mirror`` (makes the clip play backwards) are placed in the special modules ``moviepy.video.fx`` and ``moviepy.audio.fx`` and are applied with the ``clip.fx`` method, for instance ``clip.fx(time_mirror)`` (makes the clip play backwards), ``clip.fx(black_white)`` (turns the clip black and white), etc. +- The effects that you can create yourself. using + +All these effects have in common that they are **not inplace**: they do NOT modify the original clip, instead they create a new clip that is a version of the former with the changes applied. For instance: :: + + my_clip = VideoFileClip("some_file.mp4") + my_clip.set_start(t=5) # does nothing, changes are lost + my_new_clip = my_clip.set_start(t=5) # good ! + +Also, when you write ``clip.resize(width=640)``, it does not immediately applies the effect to all the frames of the clip, but only to the first frame: all the other frames will be resized only when required (that is, when you will write the whole clip to a file of when you will preview it). Said otherwise, creating a new clip is neither time nor memory hungry, all the computations happen during the final rendering. + +Time representations in MoviePy +--------------------------------- + +Many methods that we will see accept times as arguments. For instance ``clip.subclip(t_start,t_end)`` which cuts the clip between two times. For these methods, times can be represented either in seconds (``t_start=230.54``), as a couple (minutes, seconds) (``t_start=(3,50.54)``), as a triplet (hour, min, sec) (``t_start=(0,3,50.54)``) or as a string (``t_start='00:03:50.54')``). + +Most of the time when the times are not provided they are guessed, for instance in ``clip.subclip(t_start=50)`` it is implied that ``t_end`` corresponds to the end of the clip, in ``clip.subclip(t_end=20)`` it is implied that t_start=0. If the time is negative it is considered as the time before the end of the clip: ``clip.subclip(-20, -10)`` cuts the clip between 20s before the end and 10s before the end. + + +Methods to change the clip attributes +--------------------------------------- + +clip.fx +---------- + +Suppose that you have some functions implementing effects on clips, i.e. functions which, given a clip and some arguments, return a new clip: :: + + effect_1(clip, args1) -> new clip + effect_2(clip, args2) -> new clip + effect_3(clip, args3) -> new clip + +where ``args`` represent arguments and/or keyword arguments. To apply these functions, in that order, to one clip, you would write something like :: + + newclip = effect_3( effect_2( effect_1(clip, args3), args2), args1) + +but this is not easy to read. To have a clearer syntax you can use ``clip.fx``: :: + + newclip = (clip.fx( effect_1, args1) + .fx( effect_2, args2) + .fx( effect_3, args3)) + +Much better ! There are already many effects implemented in the modules ``moviepy.video.fx`` and ``moviepy.audio.fx``. The fx methods in these modules are automatically applied to the sound and the mask of the clip if it is relevant, so that you don't have to worry about modifying these. For practicality, when you use ``from moviepy.editor import *``, these two modules are loaded as ``vfx`` and ``afx``, so you may write something like :: + + from moviepy.editor import * + clip = (VideoFileClip("myvideo.avi") + .fx( vfx.resize, width=460) # resize (keep aspect ratio) + .fx( vfx.speedx, 2) # double the speed + .fx( vfx.colorx, 0.5)) # darken the picture + +For convenience, when you use ``moviepy.editor``, frequently used methods such as ``resize`` can be called in a simpler way: ``clip.resize(...)`` instead of ``clip.fx( vfx.resize, ...)`` + + +Methods to create custom effects +---------------------------------- + +clip.fl +"""""""" + + +You can modify a clip as you want using custom *filters* with ``clip.fl_time``, ``clip.fl_image``, and more generally with ``clip.fl``. + +You can change the timeline of the clip with ``clip.fl_time`` like this: :: + + modifiedClip1 = my_clip.fl_time(lambda t: 3*t) + modifiedClip2 = my_clip.fl_time(lambda t: 1+sin(t)) + +Now the clip ``modifiedClip1`` plays the same as ``my_clip``, only three times faster, while ``modifiedClip2`` will play ``my_clip`` by oscillating between the times t=0s and t=2s. Note that in the last case you have created a clip of infinite duration (which is not a problem for the moment). + +You can also modify the display of a clip with ``clip.fl_image``. The following takes a clip and inverts the green and blue channels of the frames: :: + + def invert_green_blue(image): + return image[:,:,[0,2,1]] + + modifiedClip = my_clip.fl_image( invert_green_blue ) + +Finally, you may want to process the clip by taking into account both the time and the frame picture. This is possible with the method ``clip.fl(filter)``. The filter must be a function which takes two arguments and returns a picture. the fist argument is a ``get_frame`` method (i.e. a function ``g(t)`` which given a time returns the clip's frame at that time), and the second argument is the time. :: + + def scroll(get_frame, t): + """ + This function returns a 'region' of the current frame. + The position of this region depends on the time. + """ + frame = get_frame(t) + frame_region = frame[int(t):int(t)+360,:] + return frame_region + + modifiedClip = my_clip.fl( scroll ) + +This will scroll down the clip, with a constant height of 360 pixels. + +When programming a new effect, whenever it is possible, prefer using ``fl_time`` and ``fl_image`` instead of ``fl`` if possible when implementing new effects. The reason is that, when these effects are applied to +ImageClips, MoviePy will recognize that these methods do not need to be applied to each frame, which will +result in faster renderings. diff --git a/v1.0.3/_sources/getting_started/efficient_moviepy.rst.txt b/v1.0.3/_sources/getting_started/efficient_moviepy.rst.txt new file mode 100644 index 000000000..ea328c307 --- /dev/null +++ b/v1.0.3/_sources/getting_started/efficient_moviepy.rst.txt @@ -0,0 +1,137 @@ +.. _efficient: + +How to be efficient with MoviePy +================================ + +This section gathers tips and tricks to help you make the most of what is already known worldwide as *the MoviePy experience*. + +The best way to start with MoviePy is to use it with the IPython Notebook: it makes it easier to preview clips (as we will see in this section), has autocompletion, and can display the documentation for the different methods of the library. + +.. _should_i_use_moviepy_editor: + +Should I use ``moviepy.editor`` ? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Most examples in this documentation use the submodule ``moviepy.editor``, but this submodule is not adapted to all needs so should *you* use it ? Short answer: if you use MoviePy to edit videos *by hand*, use it, but if you use MoviePy inside a larger library or program or webserver, it is better to avoid it and just load the functions that you need. + +The module ``moviepy.editor`` can be loaded using one of the three following methods: :: + + + from moviepy.editor import * # imports everything, quick and dirty + import moviepy.editor as mpy # Clean. Then use mpy.VideoClip, etc. + from moviepy.editor import VideoFileClip # just import what you need + +With any of these lines, the ``moviepy.editor`` module will actually do a lot of work behind the curtain: It will fetch all the most common classes, functions and subpackages of MoviePy, initialize a PyGame session (if PyGame is installed) to be able to preview video clips, and implement some shortcuts, like adding the ``resize`` transformation to the clips. This way you can use ``clip.resize(width=240)`` instead of the longer ``clip.fx( resize, width=240)``. In short, ``moviepy.editor`` +provides all you need to play around and edit your videos but it will take time to load (circa one second). So if all you need is one or two features inside another library, it is better to import directly what you need, as follows: :: + + from moviepy.video.io.VideoFileClip import VideoFileClip + from moviepy.video.fx.resize import resize + +.. _previewing: + +When to close() a clip +~~~~~~~~~~~~~~~~~~~~~~ + +When you create some types of clip instances - e.g. ``VideoFileClip`` or ``AudioFileClip`` - MoviePy creates a subprocess and locks the file. In order to release those resources when you are finished you should call the ``close()`` method. + +This is more important for more complex applications and it particularly important when running on Windows. While Python's garbage collector should eventually clean it the resources for you, clsing them makes them available earlier. + +However, if you close a clip too early, methods on the clip (and any clips derived from it) become unsafe. + +So, the rules of thumb are: + + * Call ``close()`` on any clip that you **construct** once you have finished using it, and have also finished using any clip that was derived from it. + * Also close any clips you create through ``AudioFileClip.coreader()``. + * Even if you close a ``CompositeVideoClip`` instance, you still need to close the clips it was created from. + * Otherwise, if you have a clip that was created by deriving it from from another clip (e.g. by calling ``set_mask()``), then generally you shouldn't close it. Closing the original clip will also close the copy. + +Clips act as `context managers `_. This means you +can use them with a ``with`` statement, and they will automatically be closed at the end of the block, even if there is +an exception. :: + + with AudioFileClip("song.wav") as clip: + raise NotImplementedError("I will work out how process this song later") + # clip.close() is implicitly called, so the lock on song.wav file is immediately released. + + +The many ways of previewing a clip +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +When you are editing a video or trying to achieve an effect with MoviePy through a trial and error process, generating the video at each trial can be very long. This section presents a few tricks to go faster. + + +clip.save_frame +""""""""""""""""" + +Most of the time, just having one frame of the video can tell you if you are doing the right thing. You can save just one frame of the clip to a file as follows: :: + + my_clip.save_frame("frame.jpeg") # saves the first frame + my_clip.save_frame("frame.png", t=2) # saves the frame a t=2s + +.. _clip_preview: + +clip.show and clip.preview +"""""""""""""""""""""""""""" + +The methods ``clip.show`` and ``clip.preview`` enable you to vizualise the clip in a Pygame window. They are the fastest way to preview, as the clips are generated and displayed at the same time, and they can be useful to get the coordinates or colors of pixels. These methods require to have PyGame installed, and to use the ``moviepy.editor`` module. + +The method ``clip.show`` enables preview one frame of a clip without having to write it to a file: the following lines display the frame in a PyGame window :: + + my_clip.show() # shows the first frame of the clip + my_clip.show(10.5) # shows the frame of the clip at t=10.5s + my_clip.show(10.5, interactive = True) + +The last line (with ``interactive=True``) displays the frame in an interactive way: if you click somewhere in the frame, it will print the position and color of the pixel. Press Escape to exit when you are done. + +A clip can be previewed as follows :: + + my_clip.preview() # preview with default fps=15 + my_clip.preview(fps=25) + my_clip.preview(fps=15, audio=False) # don't generate/play the audio. + my_audio_clip.preview(fps=22000) + +If you click somewhere in the frames of a video clip being previewed, it will print the position and color of the pixel clicked. Press Escape abort the previewing. + +Note that if the clip is complex and your computer not fast enough, the preview will appear slowed down compared to the real speed of the clip. In this case you can try to lower the frame rate (for instance to 10) or reduce the size of the clip with ``clip.resize``, it helps. + +.. _ipython_display: + +ipython_display +"""""""""""""""" + +Displaying the clips in a IPython Notebook can be very practical, especially if don't want to use ``clip.show()`` and ``clip.preview()``. Here is what it will look like: + +.. image:: ../demo_preview.jpeg + :width: 500px + :align: center + +With ``ipython_display`` you can embed videos, images and sounds, either from a file or directly from a clip: :: + + ipython_display(my_video_clip) # embeds a video + ipython_display(my_imageclip) # embeds an image + ipython_display(my_audio_clip) # embeds a sound + + ipython_display("my_picture.jpeg") # embeds an image + ipython_display("my_video.mp4") # embeds a video + ipython_display("my_sound.mp3") # embeds a sound + +This will only work if ``ipython_display`` is on the last line a the notebook cell. You can also call ``ipython_display`` as a clip method: :: + + my_video_clip.ipython_display() + +If the rendering of your clip requires to provide a frame rate, you can specify ``fps=25`` in ``ipython_display``. + +If you only need to display a snapshot of a video clip at some time `t` you can write :: + + my_video_clip.ipython_display(t=15) # will display a snapshot at t=15s + +You can also provide any valid HTML5 option as keyword argument. For instance, if the clip is too big, you will write :: + + ipython_display(my_clip, width=400) # HTML5 will resize to 400 pixels + +For instance, when you are editing an animated GIF and want to check that it loops well, you can ask the video to start automatically and to loop (i.e. replay indefinitely) : :: + + ipython_display(my_clip, autoplay=1, loop=1) + +Importantly, ``ipython_display`` actually embeds the clips physically in your notebook. The advantage is that you can move the notebook or put it online and the videos will work. The drawback is that the file size of the notebook can become very large. Depending on your browser, re-computing and displaying at video many times can take some place in the cache and the RAM (it will only be a problem for intensive uses). Restarting your browser solves the problem. \ No newline at end of file diff --git a/v1.0.3/_sources/getting_started/getting_started.rst.txt b/v1.0.3/_sources/getting_started/getting_started.rst.txt new file mode 100644 index 000000000..39ad3f02c --- /dev/null +++ b/v1.0.3/_sources/getting_started/getting_started.rst.txt @@ -0,0 +1,20 @@ +.. _getting_started: + + +Getting started with MoviePy +------------------------------ + + +These pages explain everything you need to start editing with MoviePy. To go further, have a look at the :ref:`gallery` and the :ref:`examples`. + + +.. toctree:: + :maxdepth: 1 + + quick_presentation + compositing + effects + efficient_moviepy + working_with_matplotlib + audioclips + videoclips diff --git a/v1.0.3/_sources/getting_started/quick_presentation.rst.txt b/v1.0.3/_sources/getting_started/quick_presentation.rst.txt new file mode 100644 index 000000000..54884da45 --- /dev/null +++ b/v1.0.3/_sources/getting_started/quick_presentation.rst.txt @@ -0,0 +1,90 @@ +.. _quick_presentation: + +Quick presentation +=================== + +This section explains when MoviePy can be used and how it works. + +Do I need MoviePy ? +~~~~~~~~~~~~~~~~~~~ + +Here are a few reasons why you may want to edit videos in Python: + +- You have many videos to process or to compose in a complicated way. +- You want to automatize the creation of videos or GIFs on a web server (Django, Flask, etc.) +- You want to automatize tedious tasks, like title insertions tracking objects, cuting scenes, making end credits, subtitles, etc... +- You want to code your own video effects to do something no existing video editor can. +- You want to create animations from images generated by another python library (Matplotlib, Mayavi, Gizeh, scikit-images...) + +And here are a few uses for which MoviePy is NOT the best solution: + +- You only need to do frame-by-frame video analysis (with face detection or other fancy stuff). This could be done with MoviePy in association with other libraries, but really, just use imageio_, OpenCV_ or SimpleCV, these are libraries that specialize in these tasks. +- You only want to convert a video file, or turn a series of image files into a movie. In this case it is better to directly call ``ffmpeg`` (or ``avconv`` or ``mencoder``...) it will be faster more memory-efficient than going through MoviePy. + + +Advantages and limitations +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +MoviePy has been developed with the following goals in mind: + +- **Simple an intuitive**. Basic operations can be done in one line. The code is easy to learn and easy to understand for newcomers. +- **Flexible**. You have total control over the frames of the video and audio, and creating your own effects is easy as Py. +- **Portable**. The code uses very common software (Numpy and FFMPEG) and can run on (almost) any machine with (almost) any version of Python. + +For the limitations: MoviePy cannot (yet) stream videos (read from a webcam, or render a video live on a distant machine), and is not really designed for video processing involving many successive frames of a movie (like video stabilization, you'll need another software for that). You can also have memory problems if you use many video, audio, and image sources at the same time (>100), but this will be fixed in future versions. + +Example code +~~~~~~~~~~~~~~ + +In a typical MoviePy script, you load video or audio files, modify them, put them together, and write the final result to a new video file. As an example, let us load a video of my last holidays, lower the volume, add a title in the center of the video for the first ten seconds, and write the result in a file: :: + + # Import everything needed to edit video clips + from moviepy.editor import * + + # Load myHolidays.mp4 and select the subclip 00:00:50 - 00:00:60 + clip = VideoFileClip("myHolidays.mp4").subclip(50,60) + + # Reduce the audio volume (volume x 0.8) + clip = clip.volumex(0.8) + + # Generate a text clip. You can customize the font, color, etc. + txt_clip = TextClip("My Holidays 2013",fontsize=70,color='white') + + # Say that you want it to appear 10s at the center of the screen + txt_clip = txt_clip.set_pos('center').set_duration(10) + + # Overlay the text clip on the first video clip + video = CompositeVideoClip([clip, txt_clip]) + + # Write the result to a file (many options available !) + video.write_videofile("myHolidays_edited.webm") + + +How MoviePy works +~~~~~~~~~~~~~~~~~~~ + +MoviePy uses the software ``ffmpeg`` to read and to export video and audio files. It also (optionally) uses ImageMagick to generate texts and write GIF files. The processing of the different media is ensured by Python's fast numerical library Numpy. Advanced effects and enhancements use some of Python's numerous image processing libraries (PIL, Scikit-image, scipy, etc.). + +.. image:: explanations.jpeg + :width: 570px + :align: center + +Basic concepts +~~~~~~~~~~~~~~~ + +The central objects of MoviePy are *clips*, which can be ``AudioClips`` or ``VideoClips``. They can be modified (cut, slowed down, darkened...) or put mixed with clips to form new clips, they can be previewed (using either PyGame or the IPython Notebook) and rendered to a file (as a MP4, a GIF, a MP3, etc.). ``VideoClips`` for instance can be created from a video file, an image, a text, or a custom animation. They can have an audio track (which is an ``AudioClip``) and a mask (a special ``VideoClip`` indicating which parts of the clip to hide when the clip is mixed with other clips). See :ref:`videoclips` and :ref:`CompositeVideoClips` for more details. + +A clip can be modified using one of moviepy's numerous effects (like in ``clip.resize(width="360")``, ``clip.subclip(t1,t2)``, or ``clip.fx(vfx.black_white)``) or using a user-implemented effect. MoviePy implements many functions (like ``clip.fl``, ``clip.fx``, etc.) which make it very easy to code your own effect in a few lines. See :ref:`effects` for more. + +You will also find a few advanced goodies in ``moviepy.video.tools`` to track objects in a video, draw simple shapes and color gradients (very useful for masks), generate subtitles and end credits, etc. See :ref:`advancedtools` for a description of these. + +Finally, although MoviePy has no graphical user interface, there are many ways to preview a clip which allow you to fine-tune your scripts and be sure that everything is perfect when you render you video in high quality. See :ref:`efficient`. + +.. _imageio: https://imageio.github.io/ +.. _OpenCV: http://opencv.org/ + + + + + + diff --git a/v1.0.3/_sources/getting_started/videoclips.rst.txt b/v1.0.3/_sources/getting_started/videoclips.rst.txt new file mode 100644 index 000000000..817f74fd4 --- /dev/null +++ b/v1.0.3/_sources/getting_started/videoclips.rst.txt @@ -0,0 +1,200 @@ +.. _videoclips: + +Creating and exporting video clips +=================================== + +Video and audio clips are the central objects of MoviePy. In this section we present the different sorts of clips, how to create them, and how to write them to a file. For informations on modifying a clip (cuts, effects, etc.), see :ref:`effects`. For how to put clips together see :ref:`CompositeVideoClips` and to see how to preview clips before writing a file, refer to :ref:`efficient`. + +The following code summarizes the base clips that you can create with moviepy: :: + + # VIDEO CLIPS + clip = VideoClip(make_frame, duration=4) # for custom animations (see below) + clip = VideoFileClip("my_video_file.mp4") # or .avi, .webm, .gif ... + clip = ImageSequenceClip(['image_file1.jpeg', ...], fps=24) + clip = ImageClip("my_picture.png") # or .jpeg, .tiff, ... + clip = TextClip("Hello !", font="Amiri-Bold", fontsize=70, color="black") + clip = ColorClip(size=(460,380), color=[R,G,B]) + + # AUDIO CLIPS + clip = AudioFileClip("my_audiofile.mp3") # or .ogg, .wav... or a video ! + clip = AudioArrayClip(numpy_array, fps=44100) # from a numerical array + clip = AudioClip(make_frame, duration=3) # uses a function make_frame(t) + + + +The best to understand these clips is to read the full documentation for each in the :ref:`reference_manual`. The next sections +In this section we see how to create clips, (for instance from video or audio files), how to mix them together, and how to write them to a file. + + + +Categories of video clips +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Video clips are the building blocks of longer videos. Technically, they are clips with a ``clip.get_frame(t)`` method which outputs a HxWx3 numpy array representing the frame of the clip at time *t*. There are two main categories: animated clips (made with ``VideoFileClip`` and ``VideoClip``) and unanimated clips which show the same picture for an a-priori infinite duration (``ImageClip``, ``TextClip``,``ColorClip``). There are also special video clips call masks, which belong to the categories above but output greyscale frames indicating which parts of another clip are visible or not. A video clip can carry around an audio clip (``clip.audio``) which is its *soundtrack*, and a mask clip. + +VideoClip +"""""""""" + +``VideoClip`` is the base class for all the other video clips in MoviePy. If all you want is to edit video files, you will never need it. This class is practical when you want to make animations from frames that are generated by another library. All you need is to define a function ``make_frame(t)`` which returns a HxWx3 numpy array (of 8-bits integers) representing the frame at time t. Here is an example with the graphics library Gizeh: :: + + import gizeh + import moviepy.editor as mpy + + def make_frame(t): + surface = gizeh.Surface(128,128) # width, height + radius = W*(1+ (t*(2-t))**2 )/6 # the radius varies over time + circle = gizeh.circle(radius, xy = (64,64), fill=(1,0,0)) + circle.draw(surface) + return surface.get_npimage() # returns a 8-bit RGB array + + clip = mpy.VideoClip(make_frame, duration=2) # 2 seconds + clip.write_gif("circle.gif",fps=15) + +.. image:: circle.gif + :width: 128 px + :align: center + +Note that clips make with a `make_frame` do not have an explicit frame rate, so you must provide a frame rate (``fps``, frames er second) for ``write_gif`` and ``write_videofile``, and more generally for any methods that requires iterating through the frames. + +VideoFileClip +""""""""""""""" + +A VideoFileClip is a clip read from a video file (most formats are supported) or a GIF file. You load the video as follows: :: + + myclip = VideoFileClip("some_video.avi") + myclip = VideoFileClip("some_animation.gif") + +Note that these clips will have an ``fps`` (frame per second) attribute, which will be transmitted if you do small modifications of the clip, and will be used by default in ``write_videofile``, ``write_gif``, etc. For instance: :: + + myclip = VideoFileClip("some_video.avi") + print (myclip.fps) # prints for instance '30' + # Now cut the clip between t=10 and 25 secs. This conserves the fps. + myclip2 = myclip.subclip(10, 25) + myclip2.write_gif("test.gif") # the gif will have 30 fps + + +For more, see :py:class:`~moviepy.video.io.VideoFileClip.VideoFileClip`. + +ImageSequenceClip +"""""""""""""""""" + +This is a clip made from a series of images, you call it with :: + + clip = ImageSequenceClip(images_list, fps=25) + +where ``images_list`` can be either a list of image names (that will be *played*) in that order, a folder name (at which case all the image files in the folder will be played in alphanumerical order), or a list of frames (Numpy arrays), obtained for instance from other clips. + +When you provide a folder name or list of file names, you can choose ``load_images=True`` to specify that all images should be loaded into the RAM. This is only interesting if you have a small number of images that will be each used more than once (e.g. if the images form a looping animation). + +ImageClip +"""""""""" + +An ImageClip is a video clip that always displays the same image. You can create one as follows: :: + + myclip = ImageClip("some_picture.jpeg") + myclip = ImageClip(somme_array) # a (height x width x 3) RGB numpy array + myclip = some_video_clip.to_ImageClip(t='01:00:00') # frame at t=1 hour. + +For more, see :py:class:`~moviepy.video.VideoClip.ImageClip`. + +Two examples of ImageClip shown below are the TextClip and ColorClip + +TextClip +""""""""""""""" + +Generating a TextClip requires to have ImageMagick installed and (for windows users) linked to MoviePy, see the installation instructions. + +Here is how you make a textclip (you won't need all these options all the time): :: + + myclip = TextClip("Hello", font='Amiri-Bold') + + +The font can be any font installed on your computer, but ImageMagick will have specific names for it. For instance the *normal* Amiri font will be called ``Amiri-Regular`` while the Impact font will be called ``Impact-Normal``. To get a list of the possible fonts, type ``TextClip.list('font')``. To find all the font names related to a given font, use for instance :: + + TextClip.search('Amiri', 'font') # Returns all font names containing Amiri + +Note also that the use of a stroke (or contour) will not work well on small letters, so if you need a small text with a contour, it is better to generate a big text, then downsize it: :: + + myclip = TextClip("Hello", fontsize=70, stroke_width=5).resize(height=15) + + +TextClips have many, many options: alignment, kerning (distance between the letters), stroke size, background, word wrapping, etc. see :py:class:`~moviepy.video.VideoClip.TextClip` for more. + + +Mask clips +~~~~~~~~~~~~~~ + +A mask is a special video clip which indicates which pixels will be visible when a video clip carrying this mask will be composed with other video clips (see :ref:`CompositeVideoClips`). Masks are also used to define transparency when you export the clip as GIF file or as a PNG. + +The fundamental difference between masks and standard clips is that standard clips output frames with 3 components (R-G-B) per pixel, comprised between 0 and 255, while a mask has just one composant per pixel, between 0 and 1 (1 indicating a fully visible pixel and 0 a transparent pixel). Seen otherwise, a mask is always in greyscale. + +When you create or load a clip that you will use as a mask you need to declare it: :: + + maskclip = VideoClip(makeframe, duration=4, ismask=True) + maskclip = ImageClip("my_mask.jpeg", ismask=True) + maskclip = VideoFileClip("myvideo.mp4", ismask=True) + +In the case of video and image files, if these are not already black and white they will be converted automatically. + +Then you attach this mask to a clip (which must have the same dimensions) with ``myclip.set_mask(maskclip)``. + +Some image formats like PNG support transparency with an *alpha layer*, which MoviePy will use as a mask: :: + + myclip = ImageClip("image.png", transparent=True) # True is the default + myclip.mask # <- the alpha layer of the picture. + +Any video clip can be turned into a mask with ``clip.to_mask()``, and a mask can be turned to a standard RGB video clip with ``my_mask_clip.to_RGB()``. + + +Masks are treated differently by many methods (because their frames are different) but you can do with a mask pretty much everything you can do with a standard clip: you can cut it, edit it, preview it, write it to a video file, make snapshots, etc. + +.. _renderingAClip: + +Exporting video clips +~~~~~~~~~~~~~~~~~~~~~~~ + +Video files (.mp4, .webm, .ogv...) +"""""""""""""""""""""""""""""""""""" + +To write a clip as a video file, use :: + + my_clip.write_videofile("movie.mp4") # default codec: 'libx264', 24 fps + my_clip.write_videofile("movie.mp4",fps=15) + my_clip.write_videofile("movie.webm") # webm format + my_clip.write_videofile("movie.webm",audio=False) # don't render audio. + +MoviePy has default codec names for the most common file extensions. If you want to use exotic formats or if you are not happy with the defaults you can provide the codec with ``codec='mpeg4'`` for instance. There are many many options when you are writing a video (bitrate, parameters of the audio writing, file size optimization, number of processors to use, etc.). Please refer to :py:meth:`~moviepy.video.VideoClip.VideoClip.write_videofile` for more. + + +Sometimes it is impossible for MoviePy to guess the ``duration`` attribute of the clip (keep in mind that some clips, like ImageClips displaying a picture, have *a priori* an infinite duration). Then, the ``duration`` must be set manually with ``clip.set_duration``: :: + + # Make a video showing a flower for 5 seconds + my_clip = Image("flower.jpeg") # has infinite duration + my_clip.write_videofile("flower.mp4") # Will fail ! NO DURATION ! + my_clip.set_duration(5).write_videofile("flower.mp4") # works ! + + +Animated GIFs +"""""""""""""" + +To write your video as an animated GIF, use :: + + my_clip.write_gif('test.gif', fps=12) + +Note that this requires ImageMagick installed. Otherwise you can also create the GIF with ffmpeg by adding the option ``program='ffmpeg'``, it will be much faster but won't look as nice and won't be optimized. + +There are many options to optimize the quality and size of a gif. Please refer to :py:meth:`~moviepy.video.VideoClip.VideoClip.write_gif`. + +Note that for editing gifs the best way is to preview them in the notebook as explained here: :ref:`ipython_display` + +For examples of use, see `this blog post `_ for informations on making GIFs from video files, and `this other post `_ for GIF animations with vector graphics. + +Export images +""""""""""""""" + +You can write a frame to an image file with :: + + myclip.save_frame("frame.png") # by default the first frame is extracted + myclip.save_frame("frame.jpeg", t='01:00:00') # frame at time t=1h + +If the clip has a mask it will be exported as the alpha layer of the image unless you specify ``withmask=False``. diff --git a/v1.0.3/_sources/getting_started/working_with_matplotlib.rst.txt b/v1.0.3/_sources/getting_started/working_with_matplotlib.rst.txt new file mode 100644 index 000000000..fbd17897c --- /dev/null +++ b/v1.0.3/_sources/getting_started/working_with_matplotlib.rst.txt @@ -0,0 +1,75 @@ + +Working with `matplotlib` +========================= + +Defining custom animations +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +MoviePy allows you to produce custom animations by defining a function that returns a frame at a given time of the animation in the form of a numpy array. + +An example of this workflow is below: :: + + from moviepy.editor import VideoClip + + def make_frame(t): + """Returns an image of the frame for time t.""" + # ... create the frame with any library here ... + return frame_for_time_t # (Height x Width x 3) Numpy array + + animation = VideoClip(make_frame, duration=3) # 3-second clip + +This animation can then be exported by the usual MoviePy means: :: + + # export as a video file + animation.write_videofile("my_animation.mp4", fps=24) + # export as a GIF + animation.write_gif("my_animation.gif", fps=24) # usually slower + +Simple `matplotlib` example +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +An example of an animation using `matplotlib` can then be as follows: :: + + import matplotlib.pyplot as plt + import numpy as np + from moviepy.editor import VideoClip + from moviepy.video.io.bindings import mplfig_to_npimage + + x = np.linspace(-2, 2, 200) + + duration = 2 + + fig, ax = plt.subplots() + def make_frame(t): + ax.clear() + ax.plot(x, np.sinc(x**2) + np.sin(x + 2*np.pi/duration * t), lw=3) + ax.set_ylim(-1.5, 2.5) + return mplfig_to_npimage(fig) + + animation = VideoClip(make_frame, duration=duration) + animation.write_gif('matplotlib.gif', fps=20) + + +Working in the Jupyter Notebook +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you are working inside a Jupyter Notebook, you can take advantage of the fact that VideoClips can be embedded in the output cells of the notebook with the `ipython_display` method. The above example then becomes: :: + + import matplotlib.pyplot as plt + import numpy as np + from moviepy.editor import VideoClip + from moviepy.video.io.bindings import mplfig_to_npimage + + x = np.linspace(-2, 2, 200) + + duration = 2 + + fig, ax = plt.subplots() + def make_frame(t): + ax.clear() + ax.plot(x, np.sinc(x**2) + np.sin(x + 2*np.pi/duration * t), lw=3) + ax.set_ylim(-1.5, 2.5) + return mplfig_to_npimage(fig) + + animation = VideoClip(make_frame, duration=duration) + animation.ipython_display(fps=20, loop=True, autoplay=True) \ No newline at end of file diff --git a/v1.0.3/_sources/index.rst.txt b/v1.0.3/_sources/index.rst.txt new file mode 100644 index 000000000..1e8d21234 --- /dev/null +++ b/v1.0.3/_sources/index.rst.txt @@ -0,0 +1,62 @@ + +.. image:: logo.png + :width: 50% + :align: center + +.. MoviePy +.. ======= + + + +MoviePy is a Python module for video editing, which can be used for basic operations (like cuts, concatenations, title insertions), video compositing (a.k.a. non-linear editing), video processing, or to create advanced effects. It can read and write the most common video formats, including GIF. + +Here it is in action (run in an IPython Notebook): + +.. image:: demo_preview.jpeg + :width: 500px + :align: center + +User Guide +------------ + +.. toctree:: + :maxdepth: 1 + + install + getting_started/getting_started + gallery + examples/examples + docker + opencv_instructions + FAQ + advanced_tools/advanced_tools + ref/ref + +Contribute ! +-------------- + +MoviePy is an open source software originally written by Zulko_ and released under the MIT licence. It works on Windows, Mac, and Linux, with Python 2 or Python 3. The code is hosted on Github_, where you can push improvements, report bugs and ask for help. There is also a MoviePy forum on Reddit_ and a mailing list on librelist_ . + +.. raw:: html + + + + + +.. +.. Fork me on GitHub + +.. _PyPI: https://pypi.python.org/pypi/moviepy +.. _Zulko: https://github.com/Zulko/ +.. _Stackoverflow: https://stackoverflow.com/ +.. _Github: https://github.com/Zulko/moviepy +.. _Reddit: https://www.reddit.com/r/moviepy/ +.. _librelist: mailto:moviepy@librelist.com diff --git a/v1.0.3/_sources/install.rst.txt b/v1.0.3/_sources/install.rst.txt new file mode 100644 index 000000000..ebd99be03 --- /dev/null +++ b/v1.0.3/_sources/install.rst.txt @@ -0,0 +1,70 @@ +.. _install: + +Download and Installation +========================== + + +Installation +-------------- + +**Method with pip:** if you have ``pip`` installed, just type this in a terminal (it will install ez_setup if you don't already have it) :: + + (sudo) pip install moviepy + +If you have neither ``setuptools`` nor ``ez_setup`` installed the command above will fail, is this case type this before installing: :: + + (sudo) pip install ez_setup + +**Method by hand:** download the sources, either on PyPI_ or (if you want the development version) on Github_, unzip everything in one folder, open a terminal and type :: + + (sudo) python setup.py install + +MoviePy depends on the Python modules Numpy_, imageio_, Decorator_, and tqdm_, which will be automatically installed during MoviePy's installation. It should work on Windows/Mac/Linux, with Python 2.7+ and 3 ; if you have trouble installing MoviePy or one of its dependencies, please provide feedback ! + +MoviePy depends on the software FFMPEG for video reading and writing. You don't need to worry about that, as FFMPEG should be automatically downloaded/installed by ImageIO during your first use of MoviePy (it takes a few seconds). If you want to use a specific version of FFMPEG, you can set the FFMPEG_BINARY environment variable See ``moviepy/config_defaults.py`` for details. + + +Other optional but useful dependencies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +ImageMagick_ is not strictly required, only if you want to write texts. It can also be used as a backend for GIFs but you can do GIFs with MoviePy without ImageMagick. + +Once you have installed it, ImageMagick will be automatically detected by MoviePy, **except on Windows !**. Windows user, before installing MoviePy by hand, go into the ``moviepy/config_defaults.py`` file and provide the path to the ImageMagick binary called `magick`. It should look like this :: + + IMAGEMAGICK_BINARY = "C:\\Program Files\\ImageMagick_VERSION\\magick.exe" + +You can also set the IMAGEMAGICK_BINARY environment variable See ``moviepy/config_defaults.py`` for details. + +If you are using an older version of ImageMagick, keep in mind the name of the executable is not ``magick.exe`` but ``convert.exe``. In that case, the IMAGEMAGICK_BINARY property should be ``C:\\Program Files\\ImageMagick_VERSION\\convert.exe`` + +PyGame_ is needed for video and sound previews (useless if you intend to work with MoviePy on a server but really essential for advanced video editing *by hand*). + +For advanced image processing you will need one or several of these packages. For instance using the method ``clip.resize`` requires that at least one of Scipy, PIL, Pillow or OpenCV are installed. + +- The Python Imaging Library (PIL) or, better, its branch Pillow_ . +- Scipy_ (for tracking, segmenting, etc.), and can be used for resizing video clips if PIL and OpenCV aren't installed on your computer. +- `Scikit Image`_ may be needed for some advanced image manipulation. +- `OpenCV 2.4.6`_ or more recent (provides the package ``cv2``) or more recent may be needed for some advanced image manipulation. + +If you are on linux, these packages will likely be in your repos. + +.. _`Numpy`: https://www.scipy.org/install.html +.. _Decorator: https://pypi.python.org/pypi/decorator +.. _tqdm: https://pypi.python.org/pypi/tqdm + +.. _ffmpeg: https://www.ffmpeg.org/download.html + + +.. _imageMagick: https://www.imagemagick.org/script/index.php +.. _Pygame: https://www.pygame.org/download.shtml +.. _imageio: https://imageio.github.io/ + +.. _Pillow: https://pillow.readthedocs.org/en/latest/ +.. _Scipy: https://www.scipy.org/ +.. _`Scikit Image`: http://scikit-image.org/download.html + +.. _Github: https://github.com/Zulko/moviepy +.. _PyPI: https://pypi.python.org/pypi/moviepy +.. _`OpenCV 2.4.6`: https://sourceforge.net/projects/opencvlibrary/files/ + + diff --git a/v1.0.3/_sources/opencv_instructions.rst.txt b/v1.0.3/_sources/opencv_instructions.rst.txt new file mode 100644 index 000000000..de161762f --- /dev/null +++ b/v1.0.3/_sources/opencv_instructions.rst.txt @@ -0,0 +1,35 @@ +.. _opencv: + +So you want to install OpenCV 2.4.6 ? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +OpenCV is very optional, its installation is not always simple and I found it to be unstable, be warned ! +The installation seems easy for Windows. On linux, here is what I found on the Internet: + +- Remove any other version of OpenCV if you installed it through a package manager. +- Unzip the source code of `OpenCV 2.4.6` in some folder. open a terminal in this folder. +- Make a new directory and go into this directory: :: + + mkdir release + cd release + +- Run ``cmake``. Here is the line I used: :: + + cmake -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=ON -D WITH_V4L=OFF -D INSTALL_C_EXAMPLES=ON -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON .. + +- Run ``make``. This may take a few minutes (15 minutes on my computer). :: + + make + +- Finally, install. :: + + sudo make install + +And voilà ! + +You can check if it worked by opeing a Python console and typing :: + + import cv2 + print cv2.__version__ + +Advice: do not throw your ``release`` folder away. If later you have strange bugs with OpenCV involving ``.so`` files, just redo the ``sudo make install`` step. diff --git a/v1.0.3/_sources/ref/AudioClip.rst.txt b/v1.0.3/_sources/ref/AudioClip.rst.txt new file mode 100644 index 000000000..56187c1f4 --- /dev/null +++ b/v1.0.3/_sources/ref/AudioClip.rst.txt @@ -0,0 +1,31 @@ +.. ref_AudioClip: + +************ +AudioClip +************ + +:class:`AudioClip` +========================== + +.. autoclass:: moviepy.audio.AudioClip.AudioClip + :members: + :inherited-members: + :show-inheritance: + +:class:`AudioFileClip` +========================== + +.. autoclass:: moviepy.audio.io.AudioFileClip.AudioFileClip + :members: + :inherited-members: + :show-inheritance: + + +:class:`CompositeAudioClip` +================================ + +.. autoclass:: moviepy.audio.AudioClip.CompositeAudioClip + :members: + :inherited-members: + :show-inheritance: + diff --git a/v1.0.3/_sources/ref/Clip.rst.txt b/v1.0.3/_sources/ref/Clip.rst.txt new file mode 100644 index 000000000..bac02813c --- /dev/null +++ b/v1.0.3/_sources/ref/Clip.rst.txt @@ -0,0 +1,11 @@ +************ +Clip +************ + +:class:`Clip` +========================== + +.. autoclass:: moviepy.Clip.Clip + :members: + :inherited-members: + :show-inheritance: diff --git a/v1.0.3/_sources/ref/VideoClip/VideoClip.rst.txt b/v1.0.3/_sources/ref/VideoClip/VideoClip.rst.txt new file mode 100644 index 000000000..c68108d88 --- /dev/null +++ b/v1.0.3/_sources/ref/VideoClip/VideoClip.rst.txt @@ -0,0 +1,65 @@ +.. ref_VideoClip: + +*********************** + Classes of Video Clips +*********************** + + + +:class:`VideoClip` +========================== + +.. autoclass:: moviepy.video.VideoClip.VideoClip + :members: + :inherited-members: + :show-inheritance: + + + +:class:`VideoFileClip` +------------------------ + +.. autoclass:: moviepy.video.io.VideoFileClip.VideoFileClip + :members: + :inherited-members: + :show-inheritance: + + + +:class:`ImageClip` +---------------------- + +.. autoclass:: moviepy.video.VideoClip.ImageClip + :members: + :inherited-members: + :show-inheritance: + + + +:class:`ColorClip` +------------------ + +.. autoclass:: moviepy.video.VideoClip.ColorClip + :members: + :inherited-members: + :show-inheritance: + + +:class:`TextClip` +--------------------- + +.. autoclass:: moviepy.video.VideoClip.TextClip + :members: + :inherited-members: + :show-inheritance: + + + +:class:`CompositeVideoClip` +------------------------------- + +.. autoclass:: moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip + :members: + :inherited-members: + :show-inheritance: + diff --git a/v1.0.3/_sources/ref/audiofx.rst.txt b/v1.0.3/_sources/ref/audiofx.rst.txt new file mode 100644 index 000000000..a9a214493 --- /dev/null +++ b/v1.0.3/_sources/ref/audiofx.rst.txt @@ -0,0 +1,46 @@ +.. ref_audiofx: + +************ +audio.fx +************ + + +The module ``moviepy.audio.fx`` regroups functions meant to be used with ``audio.fx()``. +Note that some of these functions such as ``volumex`` (which multiplies the volume) can +be applied directly to a video clip, at which case they will affect the audio clip attached to this +video clip. Read the docs of the different functions to know when this is the case. + +Because this module will be larger in the future, it allows two kinds of import. +You can either import a single function like this: :: + + from moviepy.audio.fx.volumex import volumex + newaudio = audioclip.fx( vfx.volumex, 0.5) + +Or import everything: :: + + import moviepy.audio.fx.all as afx + newaudio = (audioclip.afx( vfx.normalize) + .afx( vfx.volumex, 0.5) + .afx( vfx.audio_fadein, 1.0) + .afx( vfx.audio_fadeout, 1.0)) + + + +When you type :: + + from moviepy.editor import * + +the module ``audio.fx`` is loaded as ``afx`` and you can use ``afx.volumex``, etc. + + +.. currentmodule:: moviepy.audio.fx.all + +.. autosummary:: + :toctree: audiofx + :nosignatures: + + audio_fadein + audio_fadeout + audio_loop + audio_normalize + volumex diff --git a/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_fadein.rst.txt b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_fadein.rst.txt new file mode 100644 index 000000000..b31e5bd38 --- /dev/null +++ b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_fadein.rst.txt @@ -0,0 +1,6 @@ +moviepy.audio.fx.all.audio_fadein +================================= + +.. currentmodule:: moviepy.audio.fx.all + +.. autofunction:: audio_fadein \ No newline at end of file diff --git a/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_fadeout.rst.txt b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_fadeout.rst.txt new file mode 100644 index 000000000..2f8cc3616 --- /dev/null +++ b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_fadeout.rst.txt @@ -0,0 +1,6 @@ +moviepy.audio.fx.all.audio_fadeout +================================== + +.. currentmodule:: moviepy.audio.fx.all + +.. autofunction:: audio_fadeout \ No newline at end of file diff --git a/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_loop.rst.txt b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_loop.rst.txt new file mode 100644 index 000000000..04afbdd1c --- /dev/null +++ b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_loop.rst.txt @@ -0,0 +1,6 @@ +moviepy.audio.fx.all.audio_loop +=============================== + +.. currentmodule:: moviepy.audio.fx.all + +.. autofunction:: audio_loop \ No newline at end of file diff --git a/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_normalize.rst.txt b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_normalize.rst.txt new file mode 100644 index 000000000..a5cc3c771 --- /dev/null +++ b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.audio_normalize.rst.txt @@ -0,0 +1,6 @@ +moviepy.audio.fx.all.audio_normalize +================================== + +.. currentmodule:: moviepy.audio.fx.all + +.. autofunction:: audio_normalize diff --git a/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.volumex.rst.txt b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.volumex.rst.txt new file mode 100644 index 000000000..3dfb113d9 --- /dev/null +++ b/v1.0.3/_sources/ref/audiofx/moviepy.audio.fx.all.volumex.rst.txt @@ -0,0 +1,6 @@ +moviepy.audio.fx.all.volumex +============================ + +.. currentmodule:: moviepy.audio.fx.all + +.. autofunction:: volumex \ No newline at end of file diff --git a/v1.0.3/_sources/ref/audiotools.rst.txt b/v1.0.3/_sources/ref/audiotools.rst.txt new file mode 100644 index 000000000..3309b674c --- /dev/null +++ b/v1.0.3/_sources/ref/audiotools.rst.txt @@ -0,0 +1,7 @@ +.. ref_audiotools: + +************ +audio.tools +************ + +Currently empty diff --git a/v1.0.3/_sources/ref/code_origanization.rst.txt b/v1.0.3/_sources/ref/code_origanization.rst.txt new file mode 100644 index 000000000..ffdf189ab --- /dev/null +++ b/v1.0.3/_sources/ref/code_origanization.rst.txt @@ -0,0 +1,23 @@ +.. _codeorganization: + +Organization of MoviePy's code +=============================== + +This reviews the folders and files in moviepy's code. It's very easy: + +At the root of the project you have everything required for the packaging and installation of moviepy (README, setup.py, LICENCE) etc. Then you the ``docs/`` folder with the source code of the documentation, a folder for some :ref:`examples`, and the main folder ``moviepy/`` for the source code of the library itself. + +The folder ``moviepy/`` the classes and modules relative to the video and the audio are clearly separated into two subfolders ``video/`` and ``audio/``. In ``moviepy/`` you will find all the classes, functions and decorations which are useful to both submodules ``audio`` and ``video``: + +- ``Clip.py`` defines the base object for ``AudioClip`` and ``VideoClip`` and the simple methods that can be used by both, like ``clip.subclip``, ``clip.set_duration``, etc. +- Files ``config.py`` and ``config_defaults.py`` store the default paths to the external programs FFMPEG and ImageMagick. +- ``decorators.py`` provides very useful decorators that automatize some tasks, like the fact that some effects, when applied to a clip, should also be applied to it's mask, or to its audio track. +- ``tools.py`` provides misc. functions that are useful everywhere in the library, like a standardized call to subprocess, a time converter, a standardized way to print messages in the console, etc. +- ``editor.py`` is a helper module to easily load and initiate many functionalities of moviepy (see :ref:`efficient` for more details) + +The submodules ``moviepy.audio`` and ``moviepy.video`` are organized approximately the same way: at their root they implement base classes (respectively ``AudioClip`` and ``VideoClip``) and they have the following submodules: + +- ``io`` contains everything required to read files, write files, preview the clip or use a graphical interface of any sort. It contains the objects that speak to FFMEG and ImageMagick, the classes AudioFileClip and VideoFileClip, the functions used to preview a clip with pygame or to embed a video in HTML5 (for instance in the IPython Notebook). +- ``fx`` contains a collection of effects and filters (like turning a video black and white, correcting luminosity, zooming or creating a scrolling effect). To add an effect to MoviePy, you simply add a new file ``my_effect.py`` to this folder, and in the file you define the function ``my_effect(clip, *other_parameters)``. +- ``compositing`` contains functions and classes to compose videoclips (CompositeVideoClip, concatenate_videoclips, clips_array) +- ``tools`` contains advanced tools that are not effects but can help edit clips or generate new clips (tracking, subtitles, etc.) \ No newline at end of file diff --git a/v1.0.3/_sources/ref/decorators.rst.txt b/v1.0.3/_sources/ref/decorators.rst.txt new file mode 100644 index 000000000..ac280866f --- /dev/null +++ b/v1.0.3/_sources/ref/decorators.rst.txt @@ -0,0 +1,11 @@ +.. ref_decorators: + +************ +Decorators +************ +These decorators are implemented to ease the writing of methods and effects in MoviePy + +.. automodule:: moviepy.decorators + :members: + :inherited-members: + :show-inheritance: diff --git a/v1.0.3/_sources/ref/ffmpeg.rst.txt b/v1.0.3/_sources/ref/ffmpeg.rst.txt new file mode 100644 index 000000000..1cd125a38 --- /dev/null +++ b/v1.0.3/_sources/ref/ffmpeg.rst.txt @@ -0,0 +1,10 @@ +.. _ffmpegTools: + +FFMPEG tools +---------------------------- + +.. automodule:: moviepy.video.io.ffmpeg_tools + :members: + :inherited-members: + :undoc-members: + :show-inheritance: diff --git a/v1.0.3/_sources/ref/ref.rst.txt b/v1.0.3/_sources/ref/ref.rst.txt new file mode 100644 index 000000000..c2541e3ce --- /dev/null +++ b/v1.0.3/_sources/ref/ref.rst.txt @@ -0,0 +1,23 @@ +.. _reference_manual: + + +Reference Manual +================ + +The documentation may be a little messy for the moment, it will get better with time. +If you want to hack into the code or locate a particular function, read :ref:`codeorganization` . + + +.. toctree:: + :maxdepth: 3 + + Clip + VideoClip/VideoClip + AudioClip + videofx + audiofx + videotools + audiotools + ffmpeg + decorators + code_origanization diff --git a/v1.0.3/_sources/ref/videofx.rst.txt b/v1.0.3/_sources/ref/videofx.rst.txt new file mode 100644 index 000000000..2bdf10c90 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx.rst.txt @@ -0,0 +1,68 @@ +.. _refvideofx: + +*********************** +moviepy.video.fx (vfx) +*********************** +The module ``moviepy.video.fx`` regroups functions meant to be used with ``videoclip.fx()``. + +For all other modifications, we use ``clip.fx`` and ``clip.fl``. ``clip.fx`` is meant to make it easy to use already-written transformation functions, while ``clip.fl`` makes it easy to write new transformation functions. + +Because this module is starting to get large and will only get larger in the future, it allows two kinds of imports. You can either import a single function like this: :: + + from moviepy.video.fx.scroll import crop + newclip = myclip.fx( vfx.crop, x1=15) + +Or import everything: :: + + import moviepy.video.fx.all as vfx + newclip = (myclip.fx( vfx.crop, x1=15) + .fx( vfx.resize, width=200) + .fx( vfx.freeze_at_end, 1)) + + +When you type: :: + + from moviepy.editor import * + +the module ``video.fx`` is loaded as ``vfx`` and you can use ``vfx.colorx``, ``vfx.resize`` etc. + + +.. currentmodule:: moviepy.video.fx.all + +.. autosummary:: + :toctree: videofx + :nosignatures: + + accel_decel + blackwhite + blink + colorx + crop + even_size + fadein + fadeout + freeze + freeze_region + gamma_corr + headblur + invert_colors + loop + lum_contrast + make_loopable + margin + mask_and + mask_color + mask_or + mirror_x + mirror_y + painting + resize + rotate + scroll + speedx + supersample + time_mirror + time_symmetrize + + + diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.accel_decel.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.accel_decel.rst.txt new file mode 100644 index 000000000..4e499db34 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.accel_decel.rst.txt @@ -0,0 +1,6 @@ +moviepy\.video\.fx\.all\.accel\_decel +===================================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: accel_decel \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.blackwhite.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.blackwhite.rst.txt new file mode 100644 index 000000000..536d9cd70 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.blackwhite.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.blackwhite +=============================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: blackwhite \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.blink.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.blink.rst.txt new file mode 100644 index 000000000..5b34427a2 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.blink.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.blink +========================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: blink \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.colorx.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.colorx.rst.txt new file mode 100644 index 000000000..8cd179623 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.colorx.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.colorx +=========================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: colorx \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.crop.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.crop.rst.txt new file mode 100644 index 000000000..ad26f98ed --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.crop.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.crop +========================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: crop \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.even_size.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.even_size.rst.txt new file mode 100644 index 000000000..364bff043 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.even_size.rst.txt @@ -0,0 +1,6 @@ +moviepy\.video\.fx\.all\.even\_size +=================================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: even_size \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.fadein.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.fadein.rst.txt new file mode 100644 index 000000000..f34b418b4 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.fadein.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.fadein +=========================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: fadein \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.fadeout.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.fadeout.rst.txt new file mode 100644 index 000000000..07806384f --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.fadeout.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.fadeout +============================ + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: fadeout \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.freeze.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.freeze.rst.txt new file mode 100644 index 000000000..209f488eb --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.freeze.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.freeze +=========================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: freeze \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.freeze_region.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.freeze_region.rst.txt new file mode 100644 index 000000000..b4466e31a --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.freeze_region.rst.txt @@ -0,0 +1,6 @@ +moviepy\.video\.fx\.all\.freeze\_region +======================================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: freeze_region \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.gamma_corr.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.gamma_corr.rst.txt new file mode 100644 index 000000000..afb22d4c9 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.gamma_corr.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.gamma_corr +=============================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: gamma_corr \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.headblur.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.headblur.rst.txt new file mode 100644 index 000000000..3bdcdd362 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.headblur.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.headblur +============================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: headblur \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.invert_colors.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.invert_colors.rst.txt new file mode 100644 index 000000000..dfee91802 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.invert_colors.rst.txt @@ -0,0 +1,6 @@ +moviepy\.video\.fx\.all\.invert\_colors +======================================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: invert_colors \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.loop.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.loop.rst.txt new file mode 100644 index 000000000..aa03c45d7 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.loop.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.loop +========================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: loop \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.lum_contrast.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.lum_contrast.rst.txt new file mode 100644 index 000000000..1c6d13627 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.lum_contrast.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.lum_contrast +================================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: lum_contrast \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.make_loopable.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.make_loopable.rst.txt new file mode 100644 index 000000000..45ff3def3 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.make_loopable.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.make_loopable +================================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: make_loopable \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.margin.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.margin.rst.txt new file mode 100644 index 000000000..d3f331e35 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.margin.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.margin +=========================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: margin \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_and.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_and.rst.txt new file mode 100644 index 000000000..ccb643ded --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_and.rst.txt @@ -0,0 +1,6 @@ +moviepy\.video\.fx\.all\.mask\_and +================================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: mask_and \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_color.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_color.rst.txt new file mode 100644 index 000000000..c64b41801 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_color.rst.txt @@ -0,0 +1,6 @@ +moviepy\.video\.fx\.all\.mask\_color +==================================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: mask_color \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_or.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_or.rst.txt new file mode 100644 index 000000000..9351c96be --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mask_or.rst.txt @@ -0,0 +1,6 @@ +moviepy\.video\.fx\.all\.mask\_or +================================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: mask_or \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mirror_x.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mirror_x.rst.txt new file mode 100644 index 000000000..d63b281ed --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mirror_x.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.mirror_x +============================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: mirror_x \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mirror_y.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mirror_y.rst.txt new file mode 100644 index 000000000..5b5aeaddd --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.mirror_y.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.mirror_y +============================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: mirror_y \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.painting.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.painting.rst.txt new file mode 100644 index 000000000..462755b30 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.painting.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.painting +============================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: painting \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.resize.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.resize.rst.txt new file mode 100644 index 000000000..0f3b9bc28 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.resize.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.resize +=========================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: resize \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.rotate.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.rotate.rst.txt new file mode 100644 index 000000000..d68ce476e --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.rotate.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.rotate +============================= + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: rotate \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.scroll.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.scroll.rst.txt new file mode 100644 index 000000000..176e9f8f1 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.scroll.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.scroll +=========================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: scroll \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.speedx.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.speedx.rst.txt new file mode 100644 index 000000000..6710d0fe2 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.speedx.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.speedx +=========================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: speedx \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.supersample.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.supersample.rst.txt new file mode 100644 index 000000000..0776fb575 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.supersample.rst.txt @@ -0,0 +1,6 @@ +moviepy\.video\.fx\.all\.supersample +==================================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: supersample \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.time_mirror.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.time_mirror.rst.txt new file mode 100644 index 000000000..cea4a124e --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.time_mirror.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.time_mirror +================================ + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: time_mirror \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.time_symmetrize.rst.txt b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.time_symmetrize.rst.txt new file mode 100644 index 000000000..4f109d500 --- /dev/null +++ b/v1.0.3/_sources/ref/videofx/moviepy.video.fx.all.time_symmetrize.rst.txt @@ -0,0 +1,6 @@ +moviepy.video.fx.all.time_symmetrize +==================================== + +.. currentmodule:: moviepy.video.fx.all + +.. autofunction:: time_symmetrize \ No newline at end of file diff --git a/v1.0.3/_sources/ref/videotools.rst.txt b/v1.0.3/_sources/ref/videotools.rst.txt new file mode 100644 index 000000000..bb8eb46fb --- /dev/null +++ b/v1.0.3/_sources/ref/videotools.rst.txt @@ -0,0 +1,45 @@ +.. _ref_videotools: + +************ +video.tools +************ + +This module regroups advanced, useful (and less useful) functions for editing videos, by alphabetical order. + + +Credits +-------- +.. automodule:: moviepy.video.tools.credits + :members: + :inherited-members: + :show-inheritance: + +Drawing +-------- +.. automodule:: moviepy.video.tools.drawing + :members: + :inherited-members: + :show-inheritance: + +Segmenting +---------- +.. automodule:: moviepy.video.tools.segmenting + :members: + :inherited-members: + :show-inheritance: + +Subtitles +---------- +.. automodule:: moviepy.video.tools.subtitles + :members: + :inherited-members: + :show-inheritance: + +Tracking +-------- +.. automodule:: moviepy.video.tools.tracking + :members: + :inherited-members: + :show-inheritance: + + diff --git a/v1.0.3/_static/ajax-loader.gif b/v1.0.3/_static/ajax-loader.gif new file mode 100644 index 000000000..61faf8cab Binary files /dev/null and b/v1.0.3/_static/ajax-loader.gif differ diff --git a/_static/basic.css b/v1.0.3/_static/basic.css similarity index 67% rename from _static/basic.css rename to v1.0.3/_static/basic.css index 61572969d..0807176ec 100644 --- a/_static/basic.css +++ b/v1.0.3/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -15,12 +15,6 @@ div.clearer { clear: both; } -div.section::after { - display: block; - content: ''; - clear: left; -} - /* -- relbar ---------------------------------------------------------------- */ div.related { @@ -55,7 +49,7 @@ div.sphinxsidebarwrapper { div.sphinxsidebar { float: left; - width: 270px; + width: 230px; margin-left: -100%; font-size: 90%; word-wrap: break-word; @@ -130,7 +124,7 @@ ul.search li a { font-weight: bold; } -ul.search li p.context { +ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; @@ -222,7 +216,7 @@ table.modindextable td { /* -- general body styles --------------------------------------------------- */ div.body { - min-width: 360px; + min-width: 450px; max-width: 800px; } @@ -267,25 +261,19 @@ p.rubric { font-weight: bold; } -img.align-left, figure.align-left, .figure.align-left, object.align-left { +img.align-left, .figure.align-left, object.align-left { clear: left; float: left; margin-right: 1em; } -img.align-right, figure.align-right, .figure.align-right, object.align-right { +img.align-right, .figure.align-right, object.align-right { clear: right; float: right; margin-left: 1em; } -img.align-center, figure.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, figure.align-default, .figure.align-default { +img.align-center, .figure.align-center, object.align-center { display: block; margin-left: auto; margin-right: auto; @@ -299,45 +287,30 @@ img.align-default, figure.align-default, .figure.align-default { text-align: center; } -.align-default { - text-align: center; -} - .align-right { text-align: right; } /* -- sidebars -------------------------------------------------------------- */ -div.sidebar, -aside.sidebar { +div.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; - padding: 7px; + padding: 7px 7px 0 7px; background-color: #ffe; width: 40%; float: right; - clear: right; - overflow-x: auto; } p.sidebar-title { font-weight: bold; } -nav.contents, -aside.topic, -div.admonition, div.topic, blockquote { - clear: left; -} - /* -- topics ---------------------------------------------------------------- */ -nav.contents, -aside.topic, div.topic { border: 1px solid #ccc; - padding: 7px; + padding: 7px 7px 0 7px; margin: 10px 0 10px 0; } @@ -359,6 +332,10 @@ div.admonition dt { font-weight: bold; } +div.admonition dl { + margin-bottom: 0; +} + p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; @@ -369,34 +346,9 @@ div.body p.centered { margin-top: 25px; } -/* -- content of sidebars/topics/admonitions -------------------------------- */ - -div.sidebar > :last-child, -aside.sidebar > :last-child, -nav.contents > :last-child, -aside.topic > :last-child, -div.topic > :last-child, -div.admonition > :last-child { - margin-bottom: 0; -} - -div.sidebar::after, -aside.sidebar::after, -nav.contents::after, -aside.topic::after, -div.topic::after, -div.admonition::after, -blockquote::after { - display: block; - content: ''; - clear: both; -} - /* -- tables ---------------------------------------------------------------- */ table.docutils { - margin-top: 10px; - margin-bottom: 10px; border: 0; border-collapse: collapse; } @@ -406,11 +358,6 @@ table.align-center { margin-right: auto; } -table.align-default { - margin-left: auto; - margin-right: auto; -} - table caption span.caption-number { font-style: italic; } @@ -426,6 +373,10 @@ table.docutils td, table.docutils th { border-bottom: 1px solid #aaa; } +table.footnote td, table.footnote th { + border: 0 !important; +} + th { text-align: left; padding-right: 5px; @@ -440,34 +391,22 @@ table.citation td { border-bottom: none; } -th > :first-child, -td > :first-child { - margin-top: 0px; -} - -th > :last-child, -td > :last-child { - margin-bottom: 0px; -} - /* -- figures --------------------------------------------------------------- */ -div.figure, figure { +div.figure { margin: 0.5em; padding: 0.5em; } -div.figure p.caption, figcaption { +div.figure p.caption { padding: 0.3em; } -div.figure p.caption span.caption-number, -figcaption span.caption-number { +div.figure p.caption span.caption-number { font-style: italic; } -div.figure p.caption span.caption-text, -figcaption span.caption-text { +div.figure p.caption span.caption-text { } /* -- field list styles ----------------------------------------------------- */ @@ -494,71 +433,10 @@ table.field-list td, table.field-list th { /* -- hlist styles ---------------------------------------------------------- */ -table.hlist { - margin: 1em 0; -} - table.hlist td { vertical-align: top; } -/* -- object description styles --------------------------------------------- */ - -.sig { - font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; -} - -.sig-name, code.descname { - background-color: transparent; - font-weight: bold; -} - -.sig-name { - font-size: 1.1em; -} - -code.descname { - font-size: 1.2em; -} - -.sig-prename, code.descclassname { - background-color: transparent; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.sig-param.n { - font-style: italic; -} - -/* C++ specific styling */ - -.sig-inline.c-texpr, -.sig-inline.cpp-texpr { - font-family: unset; -} - -.sig.c .k, .sig.c .kt, -.sig.cpp .k, .sig.cpp .kt { - color: #0033B3; -} - -.sig.c .m, -.sig.cpp .m { - color: #1750EB; -} - -.sig.c .s, .sig.c .sc, -.sig.cpp .s, .sig.cpp .sc { - color: #067D17; -} - /* -- other body styles ----------------------------------------------------- */ @@ -582,81 +460,11 @@ ol.upperroman { list-style: upper-roman; } -:not(li) > ol > li:first-child > :first-child, -:not(li) > ul > li:first-child > :first-child { - margin-top: 0px; -} - -:not(li) > ol > li:last-child > :last-child, -:not(li) > ul > li:last-child > :last-child { - margin-bottom: 0px; -} - -ol.simple ol p, -ol.simple ul p, -ul.simple ol p, -ul.simple ul p { - margin-top: 0; -} - -ol.simple > li:not(:first-child) > p, -ul.simple > li:not(:first-child) > p { - margin-top: 0; -} - -ol.simple p, -ul.simple p { - margin-bottom: 0; -} - -aside.footnote > span, -div.citation > span { - float: left; -} -aside.footnote > span:last-of-type, -div.citation > span:last-of-type { - padding-right: 0.5em; -} -aside.footnote > p { - margin-left: 2em; -} -div.citation > p { - margin-left: 4em; -} -aside.footnote > p:last-of-type, -div.citation > p:last-of-type { - margin-bottom: 0em; -} -aside.footnote > p:last-of-type:after, -div.citation > p:last-of-type:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - dl { margin-bottom: 15px; } -dd > :first-child { +dd p { margin-top: 0px; } @@ -670,11 +478,6 @@ dd { margin-left: 30px; } -dl > dd:last-child, -dl > dd:last-child > :last-child { - margin-bottom: 0; -} - dt:target, span.highlighted { background-color: #fbe54e; } @@ -688,6 +491,14 @@ dl.glossary dt { font-size: 1.1em; } +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + .versionmodified { font-style: italic; } @@ -726,13 +537,6 @@ dl.glossary dt { font-style: oblique; } -.classifier:before { - font-style: normal; - margin: 0 0.5em; - content: ":"; - display: inline-block; -} - abbr, acronym { border-bottom: dotted 1px; cursor: help; @@ -745,69 +549,29 @@ pre { overflow-y: hidden; /* fixes display issues on Chrome browsers */ } -pre, div[class*="highlight-"] { - clear: both; -} - span.pre { -moz-hyphens: none; -ms-hyphens: none; -webkit-hyphens: none; hyphens: none; - white-space: nowrap; -} - -div[class*="highlight-"] { - margin: 1em 0; } td.linenos pre { + padding: 5px 0px; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { - display: block; -} - -table.highlighttable tbody { - display: block; -} - -table.highlighttable tr { - display: flex; + margin-left: 0.5em; } table.highlighttable td { - margin: 0; - padding: 0; -} - -table.highlighttable td.linenos { - padding-right: 0.5em; -} - -table.highlighttable td.code { - flex: 1; - overflow: hidden; -} - -.highlight .hll { - display: block; -} - -div.highlight pre, -table.highlighttable pre { - margin: 0; -} - -div.code-block-caption + div { - margin-top: 0; + padding: 0 0.5em 0 0.5em; } div.code-block-caption { - margin-top: 1em; padding: 2px 5px; font-size: small; } @@ -816,14 +580,8 @@ div.code-block-caption code { background-color: transparent; } -table.highlighttable td.linenos, -span.linenos, -div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; - -webkit-user-select: text; /* Safari fallback only */ - -webkit-user-select: none; /* Chrome/Safari */ - -moz-user-select: none; /* Firefox */ - -ms-user-select: none; /* IE10+ */ +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; } div.code-block-caption span.caption-number { @@ -835,7 +593,21 @@ div.code-block-caption span.caption-text { } div.literal-block-wrapper { - margin: 1em 0; + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; } code.xref, a code { @@ -876,7 +648,8 @@ span.eqno { } span.eqno a.headerlink { - position: absolute; + position: relative; + left: 0px; z-index: 1; } diff --git a/v1.0.3/_static/comment-bright.png b/v1.0.3/_static/comment-bright.png new file mode 100644 index 000000000..15e27edb1 Binary files /dev/null and b/v1.0.3/_static/comment-bright.png differ diff --git a/v1.0.3/_static/comment-close.png b/v1.0.3/_static/comment-close.png new file mode 100644 index 000000000..4d91bcf57 Binary files /dev/null and b/v1.0.3/_static/comment-close.png differ diff --git a/v1.0.3/_static/comment.png b/v1.0.3/_static/comment.png new file mode 100644 index 000000000..dfbc0cbd5 Binary files /dev/null and b/v1.0.3/_static/comment.png differ diff --git a/v1.0.3/_static/css/badge_only.css b/v1.0.3/_static/css/badge_only.css new file mode 100644 index 000000000..e380325bc --- /dev/null +++ b/v1.0.3/_static/css/badge_only.css @@ -0,0 +1 @@ +.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/v1.0.3/_static/css/fonts/Roboto-Slab-Bold.woff b/v1.0.3/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 000000000..6cb600001 Binary files /dev/null and b/v1.0.3/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/v1.0.3/_static/css/fonts/Roboto-Slab-Bold.woff2 b/v1.0.3/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 000000000..7059e2314 Binary files /dev/null and b/v1.0.3/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/v1.0.3/_static/css/fonts/Roboto-Slab-Regular.woff b/v1.0.3/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 000000000..f815f63f9 Binary files /dev/null and b/v1.0.3/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/v1.0.3/_static/css/fonts/Roboto-Slab-Regular.woff2 b/v1.0.3/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 000000000..f2c76e5bd Binary files /dev/null and b/v1.0.3/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/v1.0.3/_static/css/fonts/fontawesome-webfont.eot b/v1.0.3/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 000000000..e9f60ca95 Binary files /dev/null and b/v1.0.3/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/v1.0.3/_static/css/fonts/fontawesome-webfont.svg b/v1.0.3/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 000000000..855c845e5 --- /dev/null +++ b/v1.0.3/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/v1.0.3/_static/css/fonts/fontawesome-webfont.ttf b/v1.0.3/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 000000000..35acda2fa Binary files /dev/null and b/v1.0.3/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/v1.0.3/_static/css/fonts/fontawesome-webfont.woff b/v1.0.3/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 000000000..400014a4b Binary files /dev/null and b/v1.0.3/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/v1.0.3/_static/css/fonts/fontawesome-webfont.woff2 b/v1.0.3/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 000000000..4d13fc604 Binary files /dev/null and b/v1.0.3/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/v1.0.3/_static/css/fonts/lato-bold-italic.woff b/v1.0.3/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 000000000..88ad05b9f Binary files /dev/null and b/v1.0.3/_static/css/fonts/lato-bold-italic.woff differ diff --git a/v1.0.3/_static/css/fonts/lato-bold-italic.woff2 b/v1.0.3/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 000000000..c4e3d804b Binary files /dev/null and b/v1.0.3/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/v1.0.3/_static/css/fonts/lato-bold.woff b/v1.0.3/_static/css/fonts/lato-bold.woff new file mode 100644 index 000000000..c6dff51f0 Binary files /dev/null and b/v1.0.3/_static/css/fonts/lato-bold.woff differ diff --git a/v1.0.3/_static/css/fonts/lato-bold.woff2 b/v1.0.3/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 000000000..bb195043c Binary files /dev/null and b/v1.0.3/_static/css/fonts/lato-bold.woff2 differ diff --git a/v1.0.3/_static/css/fonts/lato-normal-italic.woff b/v1.0.3/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 000000000..76114bc03 Binary files /dev/null and b/v1.0.3/_static/css/fonts/lato-normal-italic.woff differ diff --git a/v1.0.3/_static/css/fonts/lato-normal-italic.woff2 b/v1.0.3/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 000000000..3404f37e2 Binary files /dev/null and b/v1.0.3/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/v1.0.3/_static/css/fonts/lato-normal.woff b/v1.0.3/_static/css/fonts/lato-normal.woff new file mode 100644 index 000000000..ae1307ff5 Binary files /dev/null and b/v1.0.3/_static/css/fonts/lato-normal.woff differ diff --git a/v1.0.3/_static/css/fonts/lato-normal.woff2 b/v1.0.3/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 000000000..3bf984332 Binary files /dev/null and b/v1.0.3/_static/css/fonts/lato-normal.woff2 differ diff --git a/v1.0.3/_static/css/theme.css b/v1.0.3/_static/css/theme.css new file mode 100644 index 000000000..8cd4f101a --- /dev/null +++ b/v1.0.3/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before,.wy-nav-top a,.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li span.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li span.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li span.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li span.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li span.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li span.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li span.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p.caption .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.btn .wy-menu-vertical li span.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p.caption .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.nav .wy-menu-vertical li span.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p.caption .btn .headerlink,.rst-content p.caption .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li span.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol li,.rst-content ol.arabic li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content ol.arabic li p:last-child,.rst-content ol.arabic li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.rst-content .wy-breadcrumbs li tt,.wy-breadcrumbs li .rst-content tt,.wy-breadcrumbs li code{padding:5px;border:none;background:none}.rst-content .wy-breadcrumbs li tt.literal,.wy-breadcrumbs li .rst-content tt.literal,.wy-breadcrumbs li code.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover span.toctree-expand,.wy-menu-vertical li.on a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover span.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover span.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp{user-select:none;pointer-events:none}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink{visibility:hidden;font-size:14px}.rst-content .code-block-caption .headerlink:after,.rst-content .toctree-wrapper>p.caption .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after{content:"\f0c1";font-family:FontAwesome}.rst-content .code-block-caption:hover .headerlink:after,.rst-content .toctree-wrapper>p.caption:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .hlist{width:100%}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl dt span.classifier:before{content:" : "}html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.field-list>dt:after,html.writer-html5 .rst-content dl.footnote>dt:after{content:":"}html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.footnote>dt>span.brackets{margin-right:.5rem}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{font-style:italic}html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.footnote>dd p,html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.field-list)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dl:not(.field-list)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code,html.writer-html4 .rst-content dl:not(.docutils) tt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/v1.0.3/_static/doctools.js b/v1.0.3/_static/doctools.js new file mode 100644 index 000000000..344db17dd --- /dev/null +++ b/v1.0.3/_static/doctools.js @@ -0,0 +1,315 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var bbox = span.getBBox(); + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + var parentOfText = node.parentNode.parentNode; + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { + this.initOnKeyListeners(); + } + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/_static/documentation_options.js b/v1.0.3/_static/documentation_options.js similarity index 60% rename from _static/documentation_options.js rename to v1.0.3/_static/documentation_options.js index 902bb3358..d28647eb8 100644 --- a/_static/documentation_options.js +++ b/v1.0.3/_static/documentation_options.js @@ -1,14 +1,10 @@ var DOCUMENTATION_OPTIONS = { URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), VERSION: '', - LANGUAGE: 'en', + LANGUAGE: 'None', COLLAPSE_INDEX: false, - BUILDER: 'html', FILE_SUFFIX: '.html', - LINK_SUFFIX: '.html', HAS_SOURCE: true, SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: true, - SHOW_SEARCH_SUMMARY: true, - ENABLE_SEARCH_SHORTCUTS: true, + NAVIGATION_WITH_KEYS: false, }; \ No newline at end of file diff --git a/v1.0.3/_static/down-pressed.png b/v1.0.3/_static/down-pressed.png new file mode 100644 index 000000000..5756c8cad Binary files /dev/null and b/v1.0.3/_static/down-pressed.png differ diff --git a/v1.0.3/_static/down.png b/v1.0.3/_static/down.png new file mode 100644 index 000000000..1b3bdad2c Binary files /dev/null and b/v1.0.3/_static/down.png differ diff --git a/_static/file.png b/v1.0.3/_static/file.png similarity index 100% rename from _static/file.png rename to v1.0.3/_static/file.png diff --git a/v1.0.3/_static/fonts/Inconsolata-Bold.ttf b/v1.0.3/_static/fonts/Inconsolata-Bold.ttf new file mode 100644 index 000000000..809c1f582 Binary files /dev/null and b/v1.0.3/_static/fonts/Inconsolata-Bold.ttf differ diff --git a/v1.0.3/_static/fonts/Inconsolata-Regular.ttf b/v1.0.3/_static/fonts/Inconsolata-Regular.ttf new file mode 100644 index 000000000..fc981ce7a Binary files /dev/null and b/v1.0.3/_static/fonts/Inconsolata-Regular.ttf differ diff --git a/v1.0.3/_static/fonts/Inconsolata.ttf b/v1.0.3/_static/fonts/Inconsolata.ttf new file mode 100644 index 000000000..4b8a36d24 Binary files /dev/null and b/v1.0.3/_static/fonts/Inconsolata.ttf differ diff --git a/v1.0.3/_static/fonts/Lato-Bold.ttf b/v1.0.3/_static/fonts/Lato-Bold.ttf new file mode 100644 index 000000000..1d23c7066 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato-Bold.ttf differ diff --git a/v1.0.3/_static/fonts/Lato-Regular.ttf b/v1.0.3/_static/fonts/Lato-Regular.ttf new file mode 100644 index 000000000..0f3d0f837 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato-Regular.ttf differ diff --git a/v1.0.3/_static/fonts/Lato/lato-bold.eot b/v1.0.3/_static/fonts/Lato/lato-bold.eot new file mode 100644 index 000000000..3361183a4 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-bold.eot differ diff --git a/v1.0.3/_static/fonts/Lato/lato-bold.ttf b/v1.0.3/_static/fonts/Lato/lato-bold.ttf new file mode 100644 index 000000000..29f691d5e Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-bold.ttf differ diff --git a/v1.0.3/_static/fonts/Lato/lato-bold.woff b/v1.0.3/_static/fonts/Lato/lato-bold.woff new file mode 100644 index 000000000..c6dff51f0 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-bold.woff differ diff --git a/v1.0.3/_static/fonts/Lato/lato-bold.woff2 b/v1.0.3/_static/fonts/Lato/lato-bold.woff2 new file mode 100644 index 000000000..bb195043c Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-bold.woff2 differ diff --git a/v1.0.3/_static/fonts/Lato/lato-bolditalic.eot b/v1.0.3/_static/fonts/Lato/lato-bolditalic.eot new file mode 100644 index 000000000..3d4154936 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-bolditalic.eot differ diff --git a/v1.0.3/_static/fonts/Lato/lato-bolditalic.ttf b/v1.0.3/_static/fonts/Lato/lato-bolditalic.ttf new file mode 100644 index 000000000..f402040b3 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-bolditalic.ttf differ diff --git a/v1.0.3/_static/fonts/Lato/lato-bolditalic.woff b/v1.0.3/_static/fonts/Lato/lato-bolditalic.woff new file mode 100644 index 000000000..88ad05b9f Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-bolditalic.woff differ diff --git a/v1.0.3/_static/fonts/Lato/lato-bolditalic.woff2 b/v1.0.3/_static/fonts/Lato/lato-bolditalic.woff2 new file mode 100644 index 000000000..c4e3d804b Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-bolditalic.woff2 differ diff --git a/v1.0.3/_static/fonts/Lato/lato-italic.eot b/v1.0.3/_static/fonts/Lato/lato-italic.eot new file mode 100644 index 000000000..3f826421a Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-italic.eot differ diff --git a/v1.0.3/_static/fonts/Lato/lato-italic.ttf b/v1.0.3/_static/fonts/Lato/lato-italic.ttf new file mode 100644 index 000000000..b4bfc9b24 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-italic.ttf differ diff --git a/v1.0.3/_static/fonts/Lato/lato-italic.woff b/v1.0.3/_static/fonts/Lato/lato-italic.woff new file mode 100644 index 000000000..76114bc03 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-italic.woff differ diff --git a/v1.0.3/_static/fonts/Lato/lato-italic.woff2 b/v1.0.3/_static/fonts/Lato/lato-italic.woff2 new file mode 100644 index 000000000..3404f37e2 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-italic.woff2 differ diff --git a/v1.0.3/_static/fonts/Lato/lato-regular.eot b/v1.0.3/_static/fonts/Lato/lato-regular.eot new file mode 100644 index 000000000..11e3f2a5f Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-regular.eot differ diff --git a/v1.0.3/_static/fonts/Lato/lato-regular.ttf b/v1.0.3/_static/fonts/Lato/lato-regular.ttf new file mode 100644 index 000000000..74decd9eb Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-regular.ttf differ diff --git a/v1.0.3/_static/fonts/Lato/lato-regular.woff b/v1.0.3/_static/fonts/Lato/lato-regular.woff new file mode 100644 index 000000000..ae1307ff5 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-regular.woff differ diff --git a/v1.0.3/_static/fonts/Lato/lato-regular.woff2 b/v1.0.3/_static/fonts/Lato/lato-regular.woff2 new file mode 100644 index 000000000..3bf984332 Binary files /dev/null and b/v1.0.3/_static/fonts/Lato/lato-regular.woff2 differ diff --git a/v1.0.3/_static/fonts/RobotoSlab-Bold.ttf b/v1.0.3/_static/fonts/RobotoSlab-Bold.ttf new file mode 100644 index 000000000..df5d1df27 Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab-Bold.ttf differ diff --git a/v1.0.3/_static/fonts/RobotoSlab-Regular.ttf b/v1.0.3/_static/fonts/RobotoSlab-Regular.ttf new file mode 100644 index 000000000..eb52a7907 Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab-Regular.ttf differ diff --git a/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot new file mode 100644 index 000000000..79dc8efed Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot differ diff --git a/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf new file mode 100644 index 000000000..df5d1df27 Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf differ diff --git a/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff new file mode 100644 index 000000000..6cb600001 Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff differ diff --git a/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 new file mode 100644 index 000000000..7059e2314 Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 differ diff --git a/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot new file mode 100644 index 000000000..2f7ca78a1 Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot differ diff --git a/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf new file mode 100644 index 000000000..eb52a7907 Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf differ diff --git a/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff new file mode 100644 index 000000000..f815f63f9 Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff differ diff --git a/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 new file mode 100644 index 000000000..f2c76e5bd Binary files /dev/null and b/v1.0.3/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 differ diff --git a/v1.0.3/_static/fonts/fontawesome-webfont.eot b/v1.0.3/_static/fonts/fontawesome-webfont.eot new file mode 100644 index 000000000..e9f60ca95 Binary files /dev/null and b/v1.0.3/_static/fonts/fontawesome-webfont.eot differ diff --git a/v1.0.3/_static/fonts/fontawesome-webfont.svg b/v1.0.3/_static/fonts/fontawesome-webfont.svg new file mode 100644 index 000000000..855c845e5 --- /dev/null +++ b/v1.0.3/_static/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/v1.0.3/_static/fonts/fontawesome-webfont.ttf b/v1.0.3/_static/fonts/fontawesome-webfont.ttf new file mode 100644 index 000000000..35acda2fa Binary files /dev/null and b/v1.0.3/_static/fonts/fontawesome-webfont.ttf differ diff --git a/v1.0.3/_static/fonts/fontawesome-webfont.woff b/v1.0.3/_static/fonts/fontawesome-webfont.woff new file mode 100644 index 000000000..400014a4b Binary files /dev/null and b/v1.0.3/_static/fonts/fontawesome-webfont.woff differ diff --git a/v1.0.3/_static/fonts/fontawesome-webfont.woff2 b/v1.0.3/_static/fonts/fontawesome-webfont.woff2 new file mode 100644 index 000000000..4d13fc604 Binary files /dev/null and b/v1.0.3/_static/fonts/fontawesome-webfont.woff2 differ diff --git a/v1.0.3/_static/jquery-3.2.1.js b/v1.0.3/_static/jquery-3.2.1.js new file mode 100644 index 000000000..d2d8ca479 --- /dev/null +++ b/v1.0.3/_static/jquery-3.2.1.js @@ -0,0 +1,10253 @@ +/*! + * jQuery JavaScript Library v3.2.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2017-03-20T18:59Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.2.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Simple selector that can be filtered directly, removing non-Elements + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + // Complex selector, compare the two sets, removing non-Elements + qualifier = jQuery.filter( qualifier, elements ); + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; + } ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( nodeName( elem, "iframe" ) ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( jQuery.isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( jQuery.isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ jQuery.camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ jQuery.camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( jQuery.camelCase ); + } else { + key = jQuery.camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, + scale = 1, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + do { + + // If previous iteration zeroed out, double until we get *something*. + // Use string for doubling so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + initialInUnit = initialInUnit / scale; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // Break the loop if scale is unchanged or perfect, or if we've just had enough. + } while ( + scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations + ); + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: jQuery.isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( ">tbody", elem )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + + if ( match ) { + elem.type = match[ 1 ]; + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( isFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rmargin = ( /^margin/ ); + +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + div.style.cssText = + "box-sizing:border-box;" + + "position:relative;display:block;" + + "margin:auto;border:1px;padding:1px;" + + "top:1%;width:50%"; + div.innerHTML = ""; + documentElement.appendChild( container ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = divStyle.marginLeft === "2px"; + boxSizingReliableVal = divStyle.width === "4px"; + + // Support: Android 4.0 - 4.3 only + // Some styles come back with percentage values, even though they shouldn't + div.style.marginRight = "50%"; + pixelMarginRightVal = divStyle.marginRight === "4px"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + + "padding:0;margin-top:1px;position:absolute"; + container.appendChild( div ); + + jQuery.extend( support, { + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelMarginRight: function() { + computeStyleTests(); + return pixelMarginRightVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a property mapped along what jQuery.cssProps suggests or to +// a vendor prefixed property. +function finalPropName( name ) { + var ret = jQuery.cssProps[ name ]; + if ( !ret ) { + ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; + } + return ret; +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { + var i, + val = 0; + + // If we already have the right measurement, avoid augmentation + if ( extra === ( isBorderBox ? "border" : "content" ) ) { + i = 4; + + // Otherwise initialize for horizontal or vertical properties + } else { + i = name === "width" ? 1 : 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); + } + + if ( isBorderBox ) { + + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // At this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } else { + + // At this point, extra isn't content, so add padding + val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // At this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with computed style + var valueIsBorderBox, + styles = getStyles( elem ), + val = curCSS( elem, name, styles ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test( val ) ) { + return val; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && + ( support.boxSizingReliable() || val === elem.style[ name ] ); + + // Fall back to offsetWidth/Height when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + if ( val === "auto" ) { + val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; + } + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + + // Use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + "float": "cssFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + } ) : + getWidthOrHeight( elem, name, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = extra && getStyles( elem ), + subtract = extra && augmentWidthOrHeight( + elem, + name, + extra, + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + styles + ); + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ name ] = value; + value = jQuery.css( elem, name ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = jQuery.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 13 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( jQuery.isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + jQuery.proxy( result.stop, result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value; + + if ( typeof stateVal === "boolean" && type === "string" ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( jQuery.isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( type === "string" ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = value.match( rnothtmlwhite ) || []; + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, isFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; +} ); + +jQuery.fn.extend( { + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +} ); + + + + +support.focusin = "onfocusin" in window; + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = jQuery.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = jQuery.isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( jQuery.isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match == null ? null : match; + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 13 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available, append data to url + if ( s.data ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + "throws": true + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( jQuery.isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Advanced tools

+

This section will briefly present the submodule moviepy.video.tools that can help you edit videos. It’s not ready yet, see video.tools (the same in more complete and more technical) instead.

+
+

Tracking

+
+
+

Cuts

+
+
+

Subtitles

+
+
+

Credits

+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/docker.html b/v1.0.3/docker.html new file mode 100644 index 000000000..df00cec36 --- /dev/null +++ b/v1.0.3/docker.html @@ -0,0 +1,267 @@ + + + + + + + + + + Moviepy Docker — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Moviepy Docker

+
+

Prequisites

+
    +
  1. Docker installed Docker for Mac, Docker for windows, linux, etc

    +
  2. +
  3. Build the Dockerfile

    +
    docker build -t moviepy -f Dockerfile .
    +
    +
    +
  4. +
+
+
+

Steps to run the git repo unittests from docker

+

Get a bash prompt in the moviepy container

+
cd tests
+docker run -it -v `pwd`:/tests moviepy bash
+
+
+

Run the tests

+
cd tests
+python test_issues.py
+
+
+
+
+

Running your own moviepy script from docker

+

Change directory to where your script is located

+

If moviepy docker container is already running, you can connect by:

+
docker exec -it moviepy python myscript.py
+
+
+

If the container isn’t running already

+
docker run -it moviepy bash
+python myscript.py
+
+
+

You can also start a container and run a script in one command:

+
docker run -it -v `pwd`:/code moviepy python myscript.py
+
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/compo_from_image.html b/v1.0.3/examples/compo_from_image.html new file mode 100644 index 000000000..c8fd28383 --- /dev/null +++ b/v1.0.3/examples/compo_from_image.html @@ -0,0 +1,278 @@ + + + + + + + + + + Placing clips according to a picture — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Placing clips according to a picture

+

So how do you do some complex compositing like this ?

+

It takes a lot of bad taste, and a segmenting tool

+

In this script we will use this image (generated with Inkscape):

+
+../_images/compo_from_image.jpeg +
+

We will find the regions of this image and fit the different clips into these regions:

+
from moviepy.editor import *
+from moviepy.video.tools.segmenting import findObjects
+
+# Load the image specifying the regions.
+im = ImageClip("../../ultracompositing/motif.png")
+
+# Loacate the regions, return a list of ImageClips
+regions = findObjects(im)
+
+
+# Load 7 clips from the US National Parks. Public Domain :D
+clips = [VideoFileClip(n, audio=False).subclip(18,22) for n in
+     [ "../../videos/romo_0004.mov",
+      "../../videos/apis-0001.mov",
+      "../../videos/romo_0001.mov",
+      "../../videos/elma_s0003.mov",
+      "../../videos/elma_s0002.mov",
+      "../../videos/calo-0007.mov",
+      "../../videos/grsm_0005.mov"]]
+
+# fit each clip into its region
+comp_clips =  [c.resize(r.size)
+                .set_mask(r.mask)
+                .set_pos(r.screenpos)
+               for c,r in zip(clips,regions)]
+
+cc = CompositeVideoClip(comp_clips,im.size)
+cc.resize(0.6).write_videofile("../../composition.mp4")
+
+# Note that this particular composition takes quite a long time of
+# rendering (about 20s on my computer for just 4s of video).
+
+
+

(note that some pictures are distorted here as their size has been modified without care for their aspect ratio. This could be changed with a few more lines.)

+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/dancing_knights.html b/v1.0.3/examples/dancing_knights.html new file mode 100644 index 000000000..ef422dc0a --- /dev/null +++ b/v1.0.3/examples/dancing_knights.html @@ -0,0 +1,364 @@ + + + + + + + + + + A reconstitution of 15th century dancing — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

A reconstitution of 15th century dancing

+

And now for something very silly…

+
+ +
# -*- coding: utf-8 -*-
+
+"""
+Result: https://www.youtube.com/watch?v=Qu7HJrsEYFg
+
+This is how we can imagine knights dancing at the 15th century, based on a very
+serious historical study here: https://www.youtube.com/watch?v=zvCvOC2VwDc
+
+Here is what we do:
+
+0- Get the video of a dancing knight, and a (Creative Commons) audio music file.
+1- load the audio file and automatically find the tempo
+2- load the video and automatically find a segment that loops well
+3- extract this segment, slow it down so that it matches the audio tempo,
+   and make it loop forever.
+4- Symmetrize this segment so that we will get two knights instead of one
+5- Add a title screen and some credits, write to a file.
+
+
+This example has been originally edited in an IPython Notebook, which makes it
+easy to preview and fine-tune each part of the editing.
+"""
+
+import os
+
+from moviepy.audio.tools.cuts import find_audio_period
+from moviepy.editor import *
+from moviepy.video.tools.cuts import find_video_period
+
+# Next lines are for downloading the required videos from Youtube.
+# To do this you must have youtube-dl installed, otherwise you will need to
+# download the videos by hand and rename them, as follows:
+#     https://www.youtube.com/watch?v=zvCvOC2VwDc => knights.mp4
+#     https://www.youtube.com/watch?v=lkY3Ek9VPtg => frontier.mp4
+
+if not os.path.exists("knights.mp4"):
+    os.system("youtube-dl zvCvOC2VwDc -o knights.mp4")
+    os.system("youtube-dl lkY3Ek9VPtg -o frontier.mp4")
+#==========
+
+
+# LOAD, EDIT, ANALYZE THE AUDIO
+
+
+audio = (AudioFileClip("frontier.mp4")
+         .subclip((4,7), (4,18))
+         .audio_fadein(1)
+         .audio_fadeout(1))
+
+audio_period = find_audio_period(audio)
+print ('Analyzed the audio, found a period of %.02f seconds'%audio_period)
+
+
+# LOAD, EDIT, ANALYZE THE VIDEO
+
+
+clip = (VideoFileClip("./knights.mp4", audio=False)
+        .subclip((1,24.15),(1,26))
+        .crop(x1=332, x2=910, y2=686))
+
+video_period = find_video_period(clip, tmin=.3)
+print ('Analyzed the video, found a period of %.02f seconds'%video_period)
+
+edited_right = (clip.subclip(0,video_period)
+                .speedx(final_duration=2*audio_period)
+                .fx(vfx.loop, duration=audio.duration)
+                .subclip(.25))
+
+edited_left = edited_right.fx(vfx.mirror_x)
+
+dancing_knights = (clips_array([[edited_left, edited_right]])
+                   .fadein(1).fadeout(1).set_audio(audio).subclip(.3))
+
+# MAKE THE TITLE SCREEN
+
+
+txt_title = (TextClip("15th century dancing\n(hypothetical)", fontsize=70,
+               font="Century-Schoolbook-Roman", color="white")
+             .margin(top=15, opacity=0)
+             .set_position(("center","top")))
+
+title = (CompositeVideoClip([dancing_knights.to_ImageClip(), txt_title])
+         .fadein(.5)
+         .set_duration(3.5))
+
+
+# MAKE THE CREDITS SCREEN
+
+
+txt_credits = """
+CREDITS
+
+Video excerpt: Le combat en armure au XVe siècle
+By J. Donzé, D. Jaquet, T. Schmuziger,
+Université de Genève, Musée National de Moyen Age
+
+Music: "Frontier", by DOCTOR VOX
+Under licence Creative Commons
+https://www.youtube.com/user/DOCTORVOXofficial
+
+Video editing © Zulko 2014
+ Licence Creative Commons (CC BY 4.0)
+Edited with MoviePy: http://zulko.github.io/moviepy/
+"""
+
+credits = (TextClip(txt_credits, color='white',
+            font="Century-Schoolbook-Roman", fontsize=35, kerning=-2,
+            interline=-1, bg_color='black', size=title.size)
+          .set_duration(2.5)
+          .fadein(.5)
+          .fadeout(.5))
+
+
+# ASSEMBLE EVERYTHING, WRITE TO FILE
+
+final = concatenate_videoclips([title, dancing_knights, credits])
+
+final.write_videofile("dancing_knights.mp4", fps=clip.fps,
+                      audio_bitrate="1000k", bitrate="4000k")
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/example_with_sound.html b/v1.0.3/examples/example_with_sound.html new file mode 100644 index 000000000..197256270 --- /dev/null +++ b/v1.0.3/examples/example_with_sound.html @@ -0,0 +1,300 @@ + + + + + + + + + + An example with sound — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

An example with sound

+

An example of using MoviePy to assemble movie clips with sounds. Here are two scenes of Charade put together:

+

Here is the code:

+
"""
+Description of the video:
+The screen is split in two parts showing Carry and Audrey at the phone,
+talking at the same time, because it is actually two scenes of a same
+movie put together.
+"""
+
+from moviepy.editor import *
+from moviepy.video.tools.drawing import color_split
+
+duration = 6 # duration of the final clip
+
+# LOAD THE MAIN SCENE
+# this small video contains the two scenes that we will put together.
+
+main_clip = VideoFileClip("../../videos/charadePhone.mp4")
+W,H = main_clip.size
+
+
+
+# MAKE THE LEFT CLIP : cut, crop, add a mask 
+                            
+mask = color_split((2*W/3,H),
+                   p1=(W/3,H), p2=(2*W/3,0),
+                   col1=1, col2=0,
+                   grad_width=2)
+                   
+mask_clip = ImageClip(mask, ismask=True)
+                   
+clip_left = (main_clip.coreader()
+                      .subclip(0,duration)
+                      .crop( x1=60, x2=60 + 2*W/3)
+                      .set_mask(mask_clip))
+
+
+# MAKE THE RIGHT CLIP : cut, crop, add a mask 
+                   
+mask = color_split((2*W/3,H),
+                   p1=(2,H), p2=(W/3+2,0),
+                   col1=0, col2=1,
+                   grad_width=2)
+
+mask_clip = ImageClip(mask, ismask=True)
+
+clip_right = (main_clip.coreader()
+                       .subclip(21,21+duration)
+                       .crop(x1=70, x2=70+2*W/3)
+                       .set_mask(mask_clip))
+
+
+
+
+# ASSEMBLE AND WRITE THE MOVIE TO A FILE
+
+cc = CompositeVideoClip([clip_right.set_pos('right').volumex(0.4),
+                         clip_left.set_pos('left').volumex(0.4)],
+                         size = (W,H))
+#cc.preview()
+cc.write_videofile("../../biphone3.avi",fps=24, codec='mpeg4')
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/examples.html b/v1.0.3/examples/examples.html new file mode 100644 index 000000000..2ac205f1a --- /dev/null +++ b/v1.0.3/examples/examples.html @@ -0,0 +1,253 @@ + + + + + + + + + + Example Scripts — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+ + +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/headblur.html b/v1.0.3/examples/headblur.html new file mode 100644 index 000000000..dcdf70d84 --- /dev/null +++ b/v1.0.3/examples/headblur.html @@ -0,0 +1,291 @@ + + + + + + + + + + Tracking and blurring someone’s face — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Tracking and blurring someone’s face

+

First we will need to track the face, i.e. to get two functions fx and fy such that (fx(t),fy(t)) gives the position of the center of the head at time t. This will be easily done with +manual_tracking. Then we will need to blur the area of the video around the center of the head.

+
import pickle
+
+from moviepy.editor import *
+from moviepy.video.tools.tracking import manual_tracking, to_fxfy
+
+# LOAD THE CLIP (subclip 6'51 - 7'01 of a chaplin movie)
+clip = VideoFileClip("../../videos/chaplin.mp4").subclip((6,51.7),(7,01.3))
+
+# MANUAL TRACKING OF THE HEAD
+
+# the three next lines are for the manual tracking and its saving
+# to a file, it must be commented once the tracking has been done
+# (after the first run of the script for instance).
+# Note that we save the list (ti,xi,yi), not the functions fx and fy
+# (that we will need) because they have dependencies.
+
+#txy, (fx,fy) = manual_tracking(clip, fps=6)
+#with open("../../chaplin_txy.dat",'w+') as f:
+#    pickle.dump(txy)
+
+
+
+# IF THE MANUAL TRACKING HAS BEEN PREVIOUSLY DONE,
+# LOAD THE TRACKING DATA AND CONVERT IT TO FUNCTIONS x(t),fy(t)
+
+with open("../../chaplin_txy.dat",'r') as f:
+    fx,fy = to_fxfy( pickle.load(f) )
+
+
+# BLUR CHAPLIN'S HEAD IN THE CLIP
+
+clip_blurred = clip.fx( vfx.headblur, fx, fy, 25)
+
+
+# Generate the text, put in on a grey background
+
+txt = TextClip("Hey you ! \n You're blurry!", color='grey70',
+               size = clip.size, bg_color='grey20',
+               font = "Century-Schoolbook-Italic", fontsize=40)
+               
+               
+# Concatenate the Chaplin clip with the text clip, add audio
+
+final = concatenate_videoclips([clip_blurred,txt.set_duration(3)]).\
+          set_audio(clip.audio)
+
+# We write the result to a file. Here we raise the bitrate so that
+# the final video is not too ugly.
+
+final.write_videofile('../../blurredChaplin.avi', bitrate="3000k")
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/logo.html b/v1.0.3/examples/logo.html new file mode 100644 index 000000000..8341bc82b --- /dev/null +++ b/v1.0.3/examples/logo.html @@ -0,0 +1,238 @@ + + + + + + + + + + MoviePy logo with a moving shadow — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

MoviePy logo with a moving shadow

+

Here the logo is a picture, while the shadow is actually a black rectangle taking the whole screen, overlaid over the logo, but with a moving mask composed of a bi-gradient, such that only one (moving) part of the rectangle is visible.

+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/masked_credits.html b/v1.0.3/examples/masked_credits.html new file mode 100644 index 000000000..0378241d1 --- /dev/null +++ b/v1.0.3/examples/masked_credits.html @@ -0,0 +1,268 @@ + + + + + + + + + + Partially Hidden credits — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Partially Hidden credits

+

Before seeing the code for this video, here is a tutorial video that explains the different steps (also made with MoviePy):

+

And here is the code:

+
from moviepy.editor import *
+from moviepy.video.tools.credits import credits1
+
+# Load the mountains clip, cut it, slow it down, make it look darker
+clip = (VideoFileClip('../../videos/badl-0001.mov', audio=False)
+           .subclip(37,46)
+           .speedx( 0.4)
+           .fx( vfx.colorx, 0.7))
+
+# Save the first frame to later make a mask with GIMP (only once)
+#~ clip.save_frame('../../credits/mountainMask2.png')
+
+
+# Load the mountain mask made with GIMP
+mountainmask = ImageClip('../../credits/mountainMask2.png',ismask=True)
+
+# Generate the credits from a text file
+credits = credits1('../../credits/credits.txt',3*clip.w/4)
+scrolling_credits = credits.set_pos(lambda t:('center',-10*t))
+
+
+# Make the credits scroll. Here, 10 pixels per second
+final = CompositeVideoClip([clip,
+                            scrolling_credits,
+                            clip.set_mask(mountainmask)])
+                            
+final.subclip(8,10).write_videofile("../../credits_mountains.avi")
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/moving_letters.html b/v1.0.3/examples/moving_letters.html new file mode 100644 index 000000000..d18dc35e3 --- /dev/null +++ b/v1.0.3/examples/moving_letters.html @@ -0,0 +1,306 @@ + + + + + + + + + + Text with moving letters — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Text with moving letters

+

I think this example illustrates well the interest of script-based editing (imagine doing that by hand).

+

Here is the code:

+
import numpy as np
+
+from moviepy.editor import *
+from moviepy.video.tools.segmenting import findObjects
+
+# WE CREATE THE TEXT THAT IS GOING TO MOVE, WE CENTER IT.
+
+screensize = (720,460)
+txtClip = TextClip('Cool effect',color='white', font="Amiri-Bold",
+                   kerning = 5, fontsize=100)
+cvc = CompositeVideoClip( [txtClip.set_pos('center')],
+                        size=screensize)
+
+# THE NEXT FOUR FUNCTIONS DEFINE FOUR WAYS OF MOVING THE LETTERS
+
+
+# helper function
+rotMatrix = lambda a: np.array( [[np.cos(a),np.sin(a)], 
+                                 [-np.sin(a),np.cos(a)]] )
+
+def vortex(screenpos,i,nletters):
+    d = lambda t : 1.0/(0.3+t**8) #damping
+    a = i*np.pi/ nletters # angle of the movement
+    v = rotMatrix(a).dot([-1,0])
+    if i%2 : v[1] = -v[1]
+    return lambda t: screenpos+400*d(t)*rotMatrix(0.5*d(t)*a).dot(v)
+    
+def cascade(screenpos,i,nletters):
+    v = np.array([0,-1])
+    d = lambda t : 1 if t<0 else abs(np.sinc(t)/(1+t**4))
+    return lambda t: screenpos+v*400*d(t-0.15*i)
+
+def arrive(screenpos,i,nletters):
+    v = np.array([-1,0])
+    d = lambda t : max(0, 3-3*t)
+    return lambda t: screenpos-400*v*d(t-0.2*i)
+    
+def vortexout(screenpos,i,nletters):
+    d = lambda t : max(0,t) #damping
+    a = i*np.pi/ nletters # angle of the movement
+    v = rotMatrix(a).dot([-1,0])
+    if i%2 : v[1] = -v[1]
+    return lambda t: screenpos+400*d(t-0.1*i)*rotMatrix(-0.2*d(t)*a).dot(v)
+
+
+
+# WE USE THE PLUGIN findObjects TO LOCATE AND SEPARATE EACH LETTER
+
+letters = findObjects(cvc) # a list of ImageClips
+
+
+# WE ANIMATE THE LETTERS
+
+def moveLetters(letters, funcpos):
+    return [ letter.set_pos(funcpos(letter.screenpos,i,len(letters)))
+              for i,letter in enumerate(letters)]
+
+clips = [ CompositeVideoClip( moveLetters(letters,funcpos),
+                              size = screensize).subclip(0,5)
+          for funcpos in [vortex, cascade, arrive, vortexout] ]
+
+# WE CONCATENATE EVERYTHING AND WRITE TO A FILE
+
+final_clip = concatenate_videoclips(clips)
+final_clip.write_videofile('../../coolTextEffects.avi',fps=25,codec='mpeg4')
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/painting_effect.html b/v1.0.3/examples/painting_effect.html new file mode 100644 index 000000000..c00544b31 --- /dev/null +++ b/v1.0.3/examples/painting_effect.html @@ -0,0 +1,298 @@ + + + + + + + + + + Freezing a movie frame with a painting effect — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Freezing a movie frame with a painting effect

+

That’s an effect that we have seen a lot in westerns and such.

+

The recipe used to make a photo look like a painting:

+
    +
  • Find the edges of the image with the Sobel algorithm. You obtain +what looks like a black and white hand-drawing of the photo.
  • +
  • Multiply the image array to make the colors flashier, and add the contours +obtained at the previous step.
  • +
+

The final clip will be the concatenation of three part: the part before +the effect, the part with the effect, and the part after the effect. +The part with the effect is obtained as follows:

+
    +
  • Take the frame to freeze and make a “painted image” of it. Make it a clip.
  • +
  • Add a text clip saying “Audrey” to the “painted image” clip.
  • +
  • Overlay the painted clip over the original frame, but make it appear and +disappear with a fading effect.
  • +
+

Here you are for the code:

+
""" requires scikit-image installed (for vfx.painting) """
+ 
+from moviepy.editor import *
+
+# WE TAKE THE SUBCLIPS WHICH ARE 2 SECONDS BEFORE & AFTER THE FREEZE
+
+charade = VideoFileClip("../../videos/charade.mp4")
+tfreeze = cvsecs(19.21) # Time of the freeze, 19'21
+
+# when using several subclips of a same clip, it can be faster
+# to create 'coreaders' of the clip (=other entrance points).
+clip_before = charade.coreader().subclip(tfreeze -2,tfreeze)
+clip_after = charade.coreader().subclip(tfreeze ,tfreeze +2)
+
+
+# THE FRAME TO FREEZE
+
+im_freeze = charade.to_ImageClip(tfreeze)
+painting = (charade.fx( vfx.painting, saturation = 1.6,black = 0.006)
+                   .to_ImageClip(tfreeze))
+                 
+txt = TextClip('Audrey',font='Amiri-regular',fontsize=35)
+
+painting_txt = (CompositeVideoClip([painting,txt.set_pos((10,180))])
+                   .add_mask()
+                   .set_duration(3)
+                   .crossfadein( 0.5)
+                   .crossfadeout( 0.5))
+
+# FADEIN/FADEOUT EFFECT ON THE PAINTED IMAGE
+
+painting_fading = CompositeVideoClip([im_freeze,painting_txt])
+
+# FINAL CLIP AND RENDERING
+
+final_clip =  concatenate_videoclips([ clip_before,
+                            painting_fading.set_duration(3),
+                            clip_after])
+
+final_clip.write_videofile('../../audrey.avi',fps=charade.fps,
+                        codec = "mpeg4", audio_bitrate="3000k")
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/quick_recipes.html b/v1.0.3/examples/quick_recipes.html new file mode 100644 index 000000000..162dcaf00 --- /dev/null +++ b/v1.0.3/examples/quick_recipes.html @@ -0,0 +1,308 @@ + + + + + + + + + + Quick recipes — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Quick recipes

+
+

Effects and filters

+
+

Blurring all frames of a video

+
from skimage.filters import gaussian_filter
+from moviepy.editor import VideoFileClip
+
+def blur(image):
+    """ Returns a blurred (radius=2 pixels) version of the image """
+    return gaussian_filter(image.astype(float), sigma=2)
+
+clip = VideoFileClip("my_video.mp4")
+clip_blurred = clip.fl_image( blur )
+clip_blurred.write_videofile("blurred_video.mp4")
+
+
+
+
+
+

Cutting videos

+
+
+

Scene detection

+
+
+

Compositing videos

+
+

Add a title before a video

+
+
+
+

Art of Gif-making

+
+

clip.fx( vfx.time_symmetrize)

+
+
# find a subclip +T = clip
+
+
+
+

Useless but fun

+
+

Getting the average frame of a video

+
from moviepy.editor import VideoFileClip, ImageClip
+clip = VideoFileClip("video.mp4")
+fps= 1.0 # take one frame per second
+nframes = clip.duration*fps # total number of frames used
+total_image = sum(clip.iter_frames(fps,dtype=float,logger='bar'))
+average_image = ImageClip(total_image/ nframes)
+average_image.save_frame("average_test.png")
+
+
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/several_characters.html b/v1.0.3/examples/several_characters.html new file mode 100644 index 000000000..99c866d5a --- /dev/null +++ b/v1.0.3/examples/several_characters.html @@ -0,0 +1,238 @@ + + + + + + + + + + Character duplication in a video — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Character duplication in a video

+

So blabla

+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/star_worms.html b/v1.0.3/examples/star_worms.html new file mode 100644 index 000000000..40010746e --- /dev/null +++ b/v1.0.3/examples/star_worms.html @@ -0,0 +1,421 @@ + + + + + + + + + + A Star-Wars like opening title — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

A Star-Wars like opening title

+

This is an approximate effect (the perspective would require some more complex transformations) but it is a nice exercise.

+

Warning: clip with sound.

+

Let us also have a look at this tutorial which shows the different steps:

+

And here you are for the code, and for the code of the tutorial.

+
"""
+Description of the video:
+Mimic of Star Wars' opening title. A text with a (false)
+perspective effect goes towards the end of space, on a
+background made of stars. Slight fading effect on the text.
+
+"""
+
+import numpy as np
+
+from moviepy.editor import *
+from moviepy.video.tools.drawing import color_gradient
+from skimage import transform as tf
+
+# RESOLUTION
+
+w = 720
+h = w*9/16 # 16/9 screen
+moviesize = w,h
+
+
+
+# THE RAW TEXT
+txt = "\n".join([
+"A long time ago, in a faraway galaxy,",
+"there lived a prince and a princess",
+"who had never seen the stars, for they",
+"lived deep underground.",
+"",
+"Many years before, the prince's",
+"grandfather had ventured out to the",
+"surface and had been burnt to ashes by",
+"solar winds.",
+"",
+"One day, as the princess was coding",
+"and the prince was shopping online, a",
+"meteor landed just a few megameters",
+"from the couple's flat."
+])
+
+
+# Add blanks
+txt = 10*"\n" +txt + 10*"\n"
+
+
+# CREATE THE TEXT IMAGE
+
+
+clip_txt = TextClip(txt,color='white', align='West',fontsize=25,
+                    font='Xolonium-Bold', method='label')
+
+
+# SCROLL THE TEXT IMAGE BY CROPPING A MOVING AREA
+
+txt_speed = 27
+fl = lambda gf,t : gf(t)[int(txt_speed*t):int(txt_speed*t)+h,:]
+moving_txt= clip_txt.fl(fl, apply_to=['mask'])
+
+
+# ADD A VANISHING EFFECT ON THE TEXT WITH A GRADIENT MASK
+
+grad = color_gradient(moving_txt.size,p1=(0,2*h/3),
+                p2=(0,h/4),col1=0.0,col2=1.0)
+gradmask = ImageClip(grad,ismask=True)
+fl = lambda pic : np.minimum(pic,gradmask.img)
+moving_txt.mask = moving_txt.mask.fl_image(fl)
+
+
+# WARP THE TEXT INTO A TRAPEZOID (PERSPECTIVE EFFECT)
+
+def trapzWarp(pic,cx,cy,ismask=False):
+    """ Complicated function (will be latex packaged as a fx) """
+    Y,X = pic.shape[:2]
+    src = np.array([[0,0],[X,0],[X,Y],[0,Y]])
+    dst = np.array([[cx*X,cy*Y],[(1-cx)*X,cy*Y],[X,Y],[0,Y]])
+    tform = tf.ProjectiveTransform()
+    tform.estimate(src,dst)
+    im = tf.warp(pic, tform.inverse, output_shape=(Y,X))
+    return im if ismask else (im*255).astype('uint8')
+
+fl_im = lambda pic : trapzWarp(pic,0.2,0.3)
+fl_mask = lambda pic : trapzWarp(pic,0.2,0.3, ismask=True)
+warped_txt= moving_txt.fl_image(fl_im)
+warped_txt.mask = warped_txt.mask.fl_image(fl_mask)
+
+
+# BACKGROUND IMAGE, DARKENED AT 60%
+
+stars = ImageClip('../../videos/stars.jpg')
+stars_darkened = stars.fl_image(lambda pic: (0.6*pic).astype('int16'))
+
+
+# COMPOSE THE MOVIE
+
+final = CompositeVideoClip([
+         stars_darkened,
+         warped_txt.set_pos(('center','bottom'))],
+         size = moviesize)
+
+
+# WRITE TO A FILE
+
+final.set_duration(8).write_videofile("starworms.avi", fps=5)
+
+# This script is heavy (30s of computations to render 8s of video)
+
+
+
+"""=====================================================================
+
+    CODE FOR THE VIDEO TUTORIAL
+
+  We will now code the video tutorial for this video.
+  When you think about it, it is a code for a video explaining how to
+  make another video using some code (this is so meta !).
+  This code uses the variables of the previous code (it should be placed
+  after that previous code to work).
+
+====================================================================="""
+
+
+
+def annotate(clip,txt,txt_color='white',bg_color=(0,0,255)):
+    """ Writes a text at the bottom of the clip. """
+    
+    txtclip = TextClip(txt, fontsize=20, font='Ubuntu-bold',
+                       color=txt_color)
+                       
+    txtclip = txtclip.on_color((clip.w,txtclip.h+6), color=(0,0,255),
+                        pos=(6,'center'))
+                        
+    cvc =  CompositeVideoClip([clip , txtclip.set_pos((0,'bottom'))])
+    
+    return cvc.set_duration(clip.duration)
+
+
+def resizeCenter(clip):
+    return clip.resize( height=h).set_pos('center')
+
+    
+def composeCenter(clip):
+    return CompositeVideoClip([clip.set_pos('center')],size=moviesize)
+
+
+
+annotated_clips = [ annotate(clip,text) for clip,text in [
+               
+(composeCenter(resizeCenter(stars)).subclip(0,3),
+    "This is a public domain picture of stars"),
+
+(CompositeVideoClip([stars],moviesize).subclip(0,3),
+    "We only keep one part."),
+
+(CompositeVideoClip([stars_darkened],moviesize).subclip(0,3),
+    "We darken it a little."),
+
+(composeCenter(resizeCenter(clip_txt)).subclip(0,3),
+    "We generate a text image."),
+
+(composeCenter(moving_txt.set_mask(None)).subclip(6,9),
+    "We scroll the text by cropping a moving region of it."),
+
+(composeCenter(gradmask.to_RGB()).subclip(0,2),
+    "We add this mask to the clip."),
+
+(composeCenter(moving_txt).subclip(6,9),
+    "Here is the result"),
+
+(composeCenter(warped_txt).subclip(6,9),
+    "We now warp this clip in a trapezoid."),
+
+(final.subclip(6,9),
+    "We finally superimpose with the stars.")
+]]
+
+# Concatenate and write to a file
+
+concatenate_videoclips(annotated_clips).write_videofile('tutorial.avi', fps=5)
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/the_end.html b/v1.0.3/examples/the_end.html new file mode 100644 index 000000000..172df8ffb --- /dev/null +++ b/v1.0.3/examples/the_end.html @@ -0,0 +1,268 @@ + + + + + + + + + + “The End” effect — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

“The End” effect

+

So let’s explain this one: there is a clip with “The End” written in the middle, and above this +clip there is the actual movie. The actual movie has a mask which represents +a white (=opaque) circle on a black (=transparent) background. At the begining, +that circle is so large that you see all the actual movie and you don’t see +the “The End” clip. Then the circle becomes progressively smaller and as a +consequence you see less of the actual movie and more of the “The End” clip.

+
from moviepy.editor import *
+from moviepy.video.tools.drawing import circle
+
+clip = VideoFileClip("../../videos/badl-0006.mov", audio=False).\
+           subclip(26,31).\
+           add_mask()
+           
+w,h = clip.size
+
+# The mask is a circle with vanishing radius r(t) = 800-200*t               
+clip.mask.get_frame = lambda t: circle(screensize=(clip.w,clip.h),
+                                       center=(clip.w/2,clip.h/4),
+                                       radius=max(0,int(800-200*t)),
+                                       col1=1, col2=0, blur=4)
+
+
+the_end = TextClip("The End", font="Amiri-bold", color="white",
+                   fontsize=70).set_duration(clip.duration)
+
+final = CompositeVideoClip([the_end.set_pos('center'),clip],
+                           size =clip.size)
+                           
+final.write_videofile("../../theEnd.avi")
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/examples/ukulele_concerto.html b/v1.0.3/examples/ukulele_concerto.html new file mode 100644 index 000000000..653783330 --- /dev/null +++ b/v1.0.3/examples/ukulele_concerto.html @@ -0,0 +1,285 @@ + + + + + + + + + + A simple music video — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

A simple music video

+

This is an example, with no sound (lame for a music video), soon to be +replaced with a real music video example (the code will be 99% the same). +The philosophy of MoviePy is that for each new music video I will make, +I will just have to copy/paste this code, and modify a few lines.

+
from moviepy.editor import *
+
+# UKULELE CLIP, OBTAINED BY CUTTING AND CROPPING
+# RAW FOOTAGE
+
+ukulele = VideoFileClip("../../videos/moi_ukulele.MOV", audio=False).\
+               subclip(60+33, 60+50).\
+               crop(486, 180, 1196, 570)
+
+w,h = moviesize = ukulele.size
+
+# THE PIANO FOOTAGE IS DOWNSIZED, HAS A WHITE MARGIN, IS
+# IN THE BOTTOM RIGHT CORNER 
+
+piano = (VideoFileClip("../../videos/douceamb.mp4",audio=False).
+         subclip(30,50).
+         resize((w/3,h/3)).    # one third of the total screen
+         margin( 6,color=(255,255,255)).  #white margin
+         margin( bottom=20, right=20, opacity=0). # transparent
+         set_pos(('right','bottom')) )
+
+
+
+# A CLIP WITH A TEXT AND A BLACK SEMI-OPAQUE BACKGROUND
+
+txt = TextClip("V. Zulkoninov - Ukulele Sonata", font='Amiri-regular',
+	               color='white',fontsize=24)
+
+txt_col = txt.on_color(size=(ukulele.w + txt.w,txt.h-10),
+                  color=(0,0,0), pos=(6,'center'), col_opacity=0.6)
+
+
+# THE TEXT CLIP IS ANIMATED.
+# I am *NOT* explaining the formula, understands who can/want.
+txt_mov = txt_col.set_pos( lambda t: (max(w/30,int(w-0.5*w*t)),
+                                  max(5*h/6,int(100*t))) )
+
+
+
+# FINAL ASSEMBLY
+final = CompositeVideoClip([ukulele,txt_mov,piano])
+final.subclip(0,5).write_videofile("../../ukulele.avi",fps=24,codec='libx264')
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/gallery.html b/v1.0.3/gallery.html new file mode 100644 index 000000000..e90580210 --- /dev/null +++ b/v1.0.3/gallery.html @@ -0,0 +1,346 @@ + + + + + + + + + + Gallery — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/genindex.html b/v1.0.3/genindex.html new file mode 100644 index 000000000..8cd350c43 --- /dev/null +++ b/v1.0.3/genindex.html @@ -0,0 +1,1314 @@ + + + + + + + + + + + Index — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Index
  • + + +
  • + + + +
  • + +
+ + +
+
+
+
+ + +

Index

+ +
+ A + | B + | C + | E + | F + | G + | H + | I + | L + | M + | O + | P + | R + | S + | T + | U + | V + | W + +
+

A

+ + + +
+ +

B

+ + +
+ +

C

+ + + +
+ +

E

+ + +
+ +

F

+ + + +
+ +

G

+ + +
+ +

H

+ + +
+ +

I

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + +
+ +

V

+ + + +
+ +

W

+ + + +
+ + + +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/getting_started/audioclips.html b/v1.0.3/getting_started/audioclips.html new file mode 100644 index 000000000..8d1956546 --- /dev/null +++ b/v1.0.3/getting_started/audioclips.html @@ -0,0 +1,269 @@ + + + + + + + + + + Audio in MoviePy — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Audio in MoviePy

+

This section shows how to use MoviePy to create and edit audio clips.

+

Note that when you cut, mix or concatenate video clips in MoviePy the audio is automatically handled and you need to worry about it. This section is of interest if you just want to edit audiofiles or you want custom audio clips for your videos.

+
+

What audioclips are made of

+

AudioClips are very similar to video clips in moviepy: they have a length, can be cut and composed the same way, etc. A notable difference be composed +audioclip.get_frame(t)

+
+
+

Creating a new audio clip

+

Audio clips can be created from an audio file or from the soundtrack of a video file

+
from moviepy.editor import *
+audioclip = AudioFileClip("some_audiofile.mp3")
+audioclip = AudioFileClip("some_video.avi")
+
+
+

for more, see AudioFileClip.

+

Alternatively you can get the audio track of an already created video clip

+
videoclip = VideoFileClip("some_video.avi")
+audioclip = videoclip.audio
+
+
+
+
+

Compositing audio clips

+
+
+

Exporting and previewing audio clips

+

You can also export assign an audio clip as the soundtrack of a video clip with

+
videoclip2 = videoclip.set_audio(my_audioclip)
+
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/getting_started/compositing.html b/v1.0.3/getting_started/compositing.html new file mode 100644 index 000000000..27d56a182 --- /dev/null +++ b/v1.0.3/getting_started/compositing.html @@ -0,0 +1,348 @@ + + + + + + + + + + Mixing clips — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Mixing clips

+

Video composition, also known as non-linear editing, is the fact of playing several clips together in a new clip. This video is a good example of what compositing you can do with MoviePy:

+
+ +

Before starting, note that video clips generally carry an audio track and a mask, which are also clips. When you compose these clips together, the soundtrack and mask of the final clip are automatically generated by putting together the soundtracks and masks of the clips. So most of the time you don’t need to worry about mixing the audio and masks.

+
+

Stacking and concatenating clips

+

Two simple ways of putting clips together is to concatenate them (to play them one after the other in a single long clip) or to stack them (to them side by side in a single larger clip).

+

Concatenation is done with the function concatenate_videoclips:

+
from moviepy.editor import VideoFileClip, concatenate_videoclips
+clip1 = VideoFileClip("myvideo.mp4")
+clip2 = VideoFileClip("myvideo2.mp4").subclip(50,60)
+clip3 = VideoFileClip("myvideo3.mp4")
+final_clip = concatenate_videoclips([clip1,clip2,clip3])
+final_clip.write_videofile("my_concatenation.mp4")
+
+
+

The final_clip is a clip that plays the clips 1, 2, and 3 one after the other. Note that the clips do not need to be the same size. If they arent’s they will all appear centered in a clip large enough to contain the biggest of them, with optionnally a color of your choosing to fill the borders. You have many other options there (see the doc of the function). You can for instance play a transition clip between the clips with the option transition=my_clip.

+

Stacking is done with clip_array:

+
from moviepy.editor import VideoFileClip, clips_array, vfx
+clip1 = VideoFileClip("myvideo.mp4").margin(10) # add 10px contour
+clip2 = clip1.fx( vfx.mirror_x)
+clip3 = clip1.fx( vfx.mirror_y)
+clip4 = clip1.resize(0.60) # downsize 60%
+final_clip = clips_array([[clip1, clip2],
+                          [clip3, clip4]])
+final_clip.resize(width=480).write_videofile("my_stack.mp4")
+
+
+

You obtain a clip which looks like this:

+
+../_images/stacked.jpeg +
+
+
+

CompositeVideoClips

+

The CompositeVideoClip class provides a very flexible way to compose clips, but is more complex than concatenate_videoclips and clips_array

+
video = CompositeVideoClip([clip1,clip2,clip3])
+
+
+

Now video plays clip1, and clip2 on top of clip1, and clip3 on top of clip1, and clip2. For instance, if clip2 and clip3 have the same size as clip1, then only clip3, which is on top, will be visible in the video… unless clip3 and clip2 have masks which hide parts of them. Note that by default the composition has the size of its first clip (as it is generally a background). But sometimes you will want to make your clips float in a bigger composition, so you will specify the size of the final composition as follows

+
video = CompositeVideoClip([clip1,clip2,clip3], size=(720,460))
+
+
+
+

Starting and stopping times

+

In a CompositionClip, all the clips start to play at a time that is specified by the clip.start attribute. You can set this starting time as follows:

+
clip1 = clip1.set_start(5) # start after 5 seconds
+
+
+

So for instance your composition will look like

+
video = CompositeVideoClip([clip1, # starts at t=0
+                            clip2.set_start(5), # start at t=5s
+                            clip3.set_start(9)]) # start at t=9s
+
+
+

In the example above, maybe clip2 will start before clip1 is over. In this case you can make clip2 appear with a fade-in effect of one second:

+
video = CompositeVideoClip([clip1, # starts at t=0
+                            clip2.set_start(5).crossfadein(1),
+                            clip3.set_start(9).crossfadein(1.5)])
+
+
+
+
+

Positioning clips

+

If clip2 and clip3 are smaller than clip1, you can decide where they will appear in the composition by setting their position. Here we indicate the coordinates of the top-left pixel of the clips:

+
video = CompositeVideoClip([clip1,
+                           clip2.set_position((45,150)),
+                           clip3.set_position((90,100))])
+
+
+

There are many ways to specify the position:

+
clip2.set_position((45,150)) # x=45, y=150 , in pixels
+
+clip2.set_position("center") # automatically centered
+
+# clip2 is horizontally centered, and at the top of the picture
+clip2.set_position(("center","top"))
+
+# clip2 is vertically centered, at the left of the picture
+clip2.set_position(("left","center"))
+
+# clip2 is at 40% of the width, 70% of the height of the screen:
+clip2.set_position((0.4,0.7), relative=True)
+
+# clip2's position is horizontally centered, and moving down !
+clip2.set_position(lambda t: ('center', 50+t) )
+
+
+

When indicating the position keep in mind that the y coordinate has its zero at the top of the picture:

+
+../_images/videoWH.jpeg +
+
+

Compositing audio clips

+

When you mix video clips together, MoviePy will automatically compose their respective audio tracks to form the audio track of the final clip, so you don’t need to worry about compositing these tracks yourself.

+

If you want to make a custom audiotrack from several audio sources: audioc clips can be mixed together with CompositeAudioClip and concatenate_audioclips:

+
from moviepy.editor import *
+# ... make some audio clips aclip1, aclip2, aclip3
+concat = concatenate_audioclips([aclip1, aclip2, aclip3])
+compo = CompositeAudioClip([aclip1.volumex(1.2),
+                            aclip2.set_start(5), # start at t=5s
+                            aclip3.set_start(9)])
+
+
+
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/getting_started/effects.html b/v1.0.3/getting_started/effects.html new file mode 100644 index 000000000..161c12c19 --- /dev/null +++ b/v1.0.3/getting_started/effects.html @@ -0,0 +1,326 @@ + + + + + + + + + + Clips transformations and effects — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Clips transformations and effects

+

There are several categories of clip modifications in MoviePy:

+
    +
  • The very common methods to change the attributes of a clip: clip.set_duration, clip.set_audio, clip.set_mask, clip.set_start etc.
  • +
  • The already-implemented effects. Core effects like clip.subclip(t1, t2) (keep only the cut between t1 and t2), which are very important, are implemented as class methods. More advanced and less common effects like loop (makes the clip play in a loop) or time_mirror (makes the clip play backwards) are placed in the special modules moviepy.video.fx and moviepy.audio.fx and are applied with the clip.fx method, for instance clip.fx(time_mirror) (makes the clip play backwards), clip.fx(black_white) (turns the clip black and white), etc.
  • +
  • The effects that you can create yourself. using
  • +
+

All these effects have in common that they are not inplace: they do NOT modify the original clip, instead they create a new clip that is a version of the former with the changes applied. For instance:

+
my_clip = VideoFileClip("some_file.mp4")
+my_clip.set_start(t=5) # does nothing, changes are lost
+my_new_clip = my_clip.set_start(t=5) # good !
+
+
+

Also, when you write clip.resize(width=640), it does not immediately applies the effect to all the frames of the clip, but only to the first frame: all the other frames will be resized only when required (that is, when you will write the whole clip to a file of when you will preview it). Said otherwise, creating a new clip is neither time nor memory hungry, all the computations happen during the final rendering.

+
+

Time representations in MoviePy

+

Many methods that we will see accept times as arguments. For instance clip.subclip(t_start,t_end) which cuts the clip between two times. For these methods, times can be represented either in seconds (t_start=230.54), as a couple (minutes, seconds) (t_start=(3,50.54)), as a triplet (hour, min, sec) (t_start=(0,3,50.54)) or as a string (t_start='00:03:50.54')).

+

Most of the time when the times are not provided they are guessed, for instance in clip.subclip(t_start=50) it is implied that t_end corresponds to the end of the clip, in clip.subclip(t_end=20) it is implied that t_start=0. If the time is negative it is considered as the time before the end of the clip: clip.subclip(-20, -10) cuts the clip between 20s before the end and 10s before the end.

+
+
+

Methods to change the clip attributes

+
+
+

clip.fx

+

Suppose that you have some functions implementing effects on clips, i.e. functions which, given a clip and some arguments, return a new clip:

+
effect_1(clip, args1) -> new clip
+effect_2(clip, args2) -> new clip
+effect_3(clip, args3) -> new clip
+
+
+

where args represent arguments and/or keyword arguments. To apply these functions, in that order, to one clip, you would write something like

+
newclip =  effect_3( effect_2( effect_1(clip, args3), args2), args1)
+
+
+

but this is not easy to read. To have a clearer syntax you can use clip.fx:

+
newclip = (clip.fx( effect_1, args1)
+               .fx( effect_2, args2)
+               .fx( effect_3, args3))
+
+
+

Much better ! There are already many effects implemented in the modules moviepy.video.fx and moviepy.audio.fx. The fx methods in these modules are automatically applied to the sound and the mask of the clip if it is relevant, so that you don’t have to worry about modifying these. For practicality, when you use from moviepy.editor import *, these two modules are loaded as vfx and afx, so you may write something like

+
from moviepy.editor import *
+clip = (VideoFileClip("myvideo.avi")
+        .fx( vfx.resize, width=460) # resize (keep aspect ratio)
+        .fx( vfx.speedx, 2) # double the speed
+        .fx( vfx.colorx, 0.5)) # darken the picture
+
+
+

For convenience, when you use moviepy.editor, frequently used methods such as resize can be called in a simpler way: clip.resize(...) instead of clip.fx( vfx.resize, ...)

+
+
+

Methods to create custom effects

+
+

clip.fl

+

You can modify a clip as you want using custom filters with clip.fl_time, clip.fl_image, and more generally with clip.fl.

+

You can change the timeline of the clip with clip.fl_time like this:

+
modifiedClip1 = my_clip.fl_time(lambda t: 3*t)
+modifiedClip2 = my_clip.fl_time(lambda t: 1+sin(t))
+
+
+

Now the clip modifiedClip1 plays the same as my_clip, only three times faster, while modifiedClip2 will play my_clip by oscillating between the times t=0s and t=2s. Note that in the last case you have created a clip of infinite duration (which is not a problem for the moment).

+

You can also modify the display of a clip with clip.fl_image. The following takes a clip and inverts the green and blue channels of the frames:

+
def invert_green_blue(image):
+    return image[:,:,[0,2,1]]
+
+modifiedClip = my_clip.fl_image( invert_green_blue )
+
+
+

Finally, you may want to process the clip by taking into account both the time and the frame picture. This is possible with the method clip.fl(filter). The filter must be a function which takes two arguments and returns a picture. the fist argument is a get_frame method (i.e. a function g(t) which given a time returns the clip’s frame at that time), and the second argument is the time.

+
def scroll(get_frame, t):
+    """
+    This function returns a 'region' of the current frame.
+    The position of this region depends on the time.
+    """
+    frame = get_frame(t)
+    frame_region = frame[int(t):int(t)+360,:]
+    return frame_region
+
+modifiedClip = my_clip.fl( scroll )
+
+
+

This will scroll down the clip, with a constant height of 360 pixels.

+

When programming a new effect, whenever it is possible, prefer using fl_time and fl_image instead of fl if possible when implementing new effects. The reason is that, when these effects are applied to +ImageClips, MoviePy will recognize that these methods do not need to be applied to each frame, which will +result in faster renderings.

+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/getting_started/efficient_moviepy.html b/v1.0.3/getting_started/efficient_moviepy.html new file mode 100644 index 000000000..019229ec2 --- /dev/null +++ b/v1.0.3/getting_started/efficient_moviepy.html @@ -0,0 +1,347 @@ + + + + + + + + + + How to be efficient with MoviePy — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

How to be efficient with MoviePy

+

This section gathers tips and tricks to help you make the most of what is already known worldwide as the MoviePy experience.

+

The best way to start with MoviePy is to use it with the IPython Notebook: it makes it easier to preview clips (as we will see in this section), has autocompletion, and can display the documentation for the different methods of the library.

+
+

Should I use moviepy.editor ?

+

Most examples in this documentation use the submodule moviepy.editor, but this submodule is not adapted to all needs so should you use it ? Short answer: if you use MoviePy to edit videos by hand, use it, but if you use MoviePy inside a larger library or program or webserver, it is better to avoid it and just load the functions that you need.

+

The module moviepy.editor can be loaded using one of the three following methods:

+
from moviepy.editor import * # imports everything, quick and dirty
+import moviepy.editor as mpy # Clean. Then use mpy.VideoClip, etc.
+from moviepy.editor import VideoFileClip # just import what you need
+
+
+

With any of these lines, the moviepy.editor module will actually do a lot of work behind the curtain: It will fetch all the most common classes, functions and subpackages of MoviePy, initialize a PyGame session (if PyGame is installed) to be able to preview video clips, and implement some shortcuts, like adding the resize transformation to the clips. This way you can use clip.resize(width=240) instead of the longer clip.fx( resize, width=240). In short, moviepy.editor +provides all you need to play around and edit your videos but it will take time to load (circa one second). So if all you need is one or two features inside another library, it is better to import directly what you need, as follows:

+
from moviepy.video.io.VideoFileClip import VideoFileClip
+from moviepy.video.fx.resize import resize
+
+
+
+
+

When to close() a clip

+

When you create some types of clip instances - e.g. VideoFileClip or AudioFileClip - MoviePy creates a subprocess and locks the file. In order to release those resources when you are finished you should call the close() method.

+

This is more important for more complex applications and it particularly important when running on Windows. While Python’s garbage collector should eventually clean it the resources for you, clsing them makes them available earlier.

+

However, if you close a clip too early, methods on the clip (and any clips derived from it) become unsafe.

+

So, the rules of thumb are:

+
+
    +
  • Call close() on any clip that you construct once you have finished using it, and have also finished using any clip that was derived from it.
  • +
  • Also close any clips you create through AudioFileClip.coreader().
  • +
  • Even if you close a CompositeVideoClip instance, you still need to close the clips it was created from.
  • +
  • Otherwise, if you have a clip that was created by deriving it from from another clip (e.g. by calling set_mask()), then generally you shouldn’t close it. Closing the original clip will also close the copy.
  • +
+
+

Clips act as context managers. This means you +can use them with a with statement, and they will automatically be closed at the end of the block, even if there is +an exception.

+
with AudioFileClip("song.wav") as clip:
+    raise NotImplementedError("I will work out how process this song later")
+# clip.close() is implicitly called, so the lock on song.wav file is immediately released.
+
+
+
+
+

The many ways of previewing a clip

+

When you are editing a video or trying to achieve an effect with MoviePy through a trial and error process, generating the video at each trial can be very long. This section presents a few tricks to go faster.

+
+

clip.save_frame

+

Most of the time, just having one frame of the video can tell you if you are doing the right thing. You can save just one frame of the clip to a file as follows:

+
my_clip.save_frame("frame.jpeg") # saves the first frame
+my_clip.save_frame("frame.png", t=2) # saves the frame a t=2s
+
+
+
+
+

clip.show and clip.preview

+

The methods clip.show and clip.preview enable you to vizualise the clip in a Pygame window. They are the fastest way to preview, as the clips are generated and displayed at the same time, and they can be useful to get the coordinates or colors of pixels. These methods require to have PyGame installed, and to use the moviepy.editor module.

+

The method clip.show enables preview one frame of a clip without having to write it to a file: the following lines display the frame in a PyGame window

+
my_clip.show() # shows the first frame of the clip
+my_clip.show(10.5) # shows the frame of the clip at t=10.5s
+my_clip.show(10.5, interactive = True)
+
+
+

The last line (with interactive=True) displays the frame in an interactive way: if you click somewhere in the frame, it will print the position and color of the pixel. Press Escape to exit when you are done.

+

A clip can be previewed as follows

+
my_clip.preview() # preview with default fps=15
+my_clip.preview(fps=25)
+my_clip.preview(fps=15, audio=False) # don't generate/play the audio.
+my_audio_clip.preview(fps=22000)
+
+
+

If you click somewhere in the frames of a video clip being previewed, it will print the position and color of the pixel clicked. Press Escape abort the previewing.

+

Note that if the clip is complex and your computer not fast enough, the preview will appear slowed down compared to the real speed of the clip. In this case you can try to lower the frame rate (for instance to 10) or reduce the size of the clip with clip.resize, it helps.

+
+
+

ipython_display

+

Displaying the clips in a IPython Notebook can be very practical, especially if don’t want to use clip.show() and clip.preview(). Here is what it will look like:

+../_images/demo_preview.jpeg +

With ipython_display you can embed videos, images and sounds, either from a file or directly from a clip:

+
ipython_display(my_video_clip) # embeds a video
+ipython_display(my_imageclip) # embeds an image
+ipython_display(my_audio_clip) # embeds a sound
+
+ipython_display("my_picture.jpeg") # embeds an image
+ipython_display("my_video.mp4") # embeds a video
+ipython_display("my_sound.mp3") # embeds a sound
+
+
+

This will only work if ipython_display is on the last line a the notebook cell. You can also call ipython_display as a clip method:

+
my_video_clip.ipython_display()
+
+
+

If the rendering of your clip requires to provide a frame rate, you can specify fps=25 in ipython_display.

+

If you only need to display a snapshot of a video clip at some time t you can write

+
my_video_clip.ipython_display(t=15) # will display a snapshot at t=15s
+
+
+

You can also provide any valid HTML5 option as keyword argument. For instance, if the clip is too big, you will write

+
ipython_display(my_clip, width=400) # HTML5 will resize to 400 pixels
+
+
+

For instance, when you are editing an animated GIF and want to check that it loops well, you can ask the video to start automatically and to loop (i.e. replay indefinitely) :

+
ipython_display(my_clip, autoplay=1, loop=1)
+
+
+

Importantly, ipython_display actually embeds the clips physically in your notebook. The advantage is that you can move the notebook or put it online and the videos will work. The drawback is that the file size of the notebook can become very large. Depending on your browser, re-computing and displaying at video many times can take some place in the cache and the RAM (it will only be a problem for intensive uses). Restarting your browser solves the problem.

+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/getting_started/getting_started.html b/v1.0.3/getting_started/getting_started.html new file mode 100644 index 000000000..185f5eba5 --- /dev/null +++ b/v1.0.3/getting_started/getting_started.html @@ -0,0 +1,241 @@ + + + + + + + + + + Getting started with MoviePy — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Getting started with MoviePy

+

These pages explain everything you need to start editing with MoviePy. To go further, have a look at the Gallery and the Example Scripts.

+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/getting_started/quick_presentation.html b/v1.0.3/getting_started/quick_presentation.html new file mode 100644 index 000000000..adfbd704d --- /dev/null +++ b/v1.0.3/getting_started/quick_presentation.html @@ -0,0 +1,303 @@ + + + + + + + + + + Quick presentation — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Quick presentation

+

This section explains when MoviePy can be used and how it works.

+
+

Do I need MoviePy ?

+

Here are a few reasons why you may want to edit videos in Python:

+
    +
  • You have many videos to process or to compose in a complicated way.
  • +
  • You want to automatize the creation of videos or GIFs on a web server (Django, Flask, etc.)
  • +
  • You want to automatize tedious tasks, like title insertions tracking objects, cuting scenes, making end credits, subtitles, etc…
  • +
  • You want to code your own video effects to do something no existing video editor can.
  • +
  • You want to create animations from images generated by another python library (Matplotlib, Mayavi, Gizeh, scikit-images…)
  • +
+

And here are a few uses for which MoviePy is NOT the best solution:

+
    +
  • You only need to do frame-by-frame video analysis (with face detection or other fancy stuff). This could be done with MoviePy in association with other libraries, but really, just use imageio, OpenCV or SimpleCV, these are libraries that specialize in these tasks.
  • +
  • You only want to convert a video file, or turn a series of image files into a movie. In this case it is better to directly call ffmpeg (or avconv or mencoder…) it will be faster more memory-efficient than going through MoviePy.
  • +
+
+
+

Advantages and limitations

+

MoviePy has been developed with the following goals in mind:

+
    +
  • Simple an intuitive. Basic operations can be done in one line. The code is easy to learn and easy to understand for newcomers.
  • +
  • Flexible. You have total control over the frames of the video and audio, and creating your own effects is easy as Py.
  • +
  • Portable. The code uses very common software (Numpy and FFMPEG) and can run on (almost) any machine with (almost) any version of Python.
  • +
+

For the limitations: MoviePy cannot (yet) stream videos (read from a webcam, or render a video live on a distant machine), and is not really designed for video processing involving many successive frames of a movie (like video stabilization, you’ll need another software for that). You can also have memory problems if you use many video, audio, and image sources at the same time (>100), but this will be fixed in future versions.

+
+
+

Example code

+

In a typical MoviePy script, you load video or audio files, modify them, put them together, and write the final result to a new video file. As an example, let us load a video of my last holidays, lower the volume, add a title in the center of the video for the first ten seconds, and write the result in a file:

+
# Import everything needed to edit video clips
+from moviepy.editor import *
+
+# Load myHolidays.mp4 and select the subclip 00:00:50 - 00:00:60
+clip = VideoFileClip("myHolidays.mp4").subclip(50,60)
+
+# Reduce the audio volume (volume x 0.8)
+clip = clip.volumex(0.8)
+
+# Generate a text clip. You can customize the font, color, etc.
+txt_clip = TextClip("My Holidays 2013",fontsize=70,color='white')
+
+# Say that you want it to appear 10s at the center of the screen
+txt_clip = txt_clip.set_pos('center').set_duration(10)
+
+# Overlay the text clip on the first video clip
+video = CompositeVideoClip([clip, txt_clip])
+
+# Write the result to a file (many options available !)
+video.write_videofile("myHolidays_edited.webm")
+
+
+
+
+

How MoviePy works

+

MoviePy uses the software ffmpeg to read and to export video and audio files. It also (optionally) uses ImageMagick to generate texts and write GIF files. The processing of the different media is ensured by Python’s fast numerical library Numpy. Advanced effects and enhancements use some of Python’s numerous image processing libraries (PIL, Scikit-image, scipy, etc.).

+../_images/explanations.jpeg +
+
+

Basic concepts

+

The central objects of MoviePy are clips, which can be AudioClips or VideoClips. They can be modified (cut, slowed down, darkened…) or put mixed with clips to form new clips, they can be previewed (using either PyGame or the IPython Notebook) and rendered to a file (as a MP4, a GIF, a MP3, etc.). VideoClips for instance can be created from a video file, an image, a text, or a custom animation. They can have an audio track (which is an AudioClip) and a mask (a special VideoClip indicating which parts of the clip to hide when the clip is mixed with other clips). See Creating and exporting video clips and Mixing clips for more details.

+

A clip can be modified using one of moviepy’s numerous effects (like in clip.resize(width="360"), clip.subclip(t1,t2), or clip.fx(vfx.black_white)) or using a user-implemented effect. MoviePy implements many functions (like clip.fl, clip.fx, etc.) which make it very easy to code your own effect in a few lines. See Clips transformations and effects for more.

+

You will also find a few advanced goodies in moviepy.video.tools to track objects in a video, draw simple shapes and color gradients (very useful for masks), generate subtitles and end credits, etc. See Advanced tools for a description of these.

+

Finally, although MoviePy has no graphical user interface, there are many ways to preview a clip which allow you to fine-tune your scripts and be sure that everything is perfect when you render you video in high quality. See How to be efficient with MoviePy.

+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/getting_started/videoclips.html b/v1.0.3/getting_started/videoclips.html new file mode 100644 index 000000000..6d59c8e5a --- /dev/null +++ b/v1.0.3/getting_started/videoclips.html @@ -0,0 +1,405 @@ + + + + + + + + + + Creating and exporting video clips — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Creating and exporting video clips

+

Video and audio clips are the central objects of MoviePy. In this section we present the different sorts of clips, how to create them, and how to write them to a file. For informations on modifying a clip (cuts, effects, etc.), see Clips transformations and effects. For how to put clips together see Mixing clips and to see how to preview clips before writing a file, refer to How to be efficient with MoviePy.

+

The following code summarizes the base clips that you can create with moviepy:

+
# VIDEO CLIPS
+clip = VideoClip(make_frame, duration=4) # for custom animations (see below)
+clip = VideoFileClip("my_video_file.mp4") # or .avi, .webm, .gif ...
+clip = ImageSequenceClip(['image_file1.jpeg', ...], fps=24)
+clip = ImageClip("my_picture.png") # or .jpeg, .tiff, ...
+clip = TextClip("Hello !", font="Amiri-Bold", fontsize=70, color="black")
+clip = ColorClip(size=(460,380), color=[R,G,B])
+
+# AUDIO CLIPS
+clip = AudioFileClip("my_audiofile.mp3") # or .ogg, .wav... or a video !
+clip = AudioArrayClip(numpy_array, fps=44100) # from a numerical array
+clip = AudioClip(make_frame, duration=3) # uses a function make_frame(t)
+
+
+

The best to understand these clips is to read the full documentation for each in the Reference Manual. The next sections +In this section we see how to create clips, (for instance from video or audio files), how to mix them together, and how to write them to a file.

+
+

Categories of video clips

+

Video clips are the building blocks of longer videos. Technically, they are clips with a clip.get_frame(t) method which outputs a HxWx3 numpy array representing the frame of the clip at time t. There are two main categories: animated clips (made with VideoFileClip and VideoClip) and unanimated clips which show the same picture for an a-priori infinite duration (ImageClip, TextClip,``ColorClip``). There are also special video clips call masks, which belong to the categories above but output greyscale frames indicating which parts of another clip are visible or not. A video clip can carry around an audio clip (clip.audio) which is its soundtrack, and a mask clip.

+
+

VideoClip

+

VideoClip is the base class for all the other video clips in MoviePy. If all you want is to edit video files, you will never need it. This class is practical when you want to make animations from frames that are generated by another library. All you need is to define a function make_frame(t) which returns a HxWx3 numpy array (of 8-bits integers) representing the frame at time t. Here is an example with the graphics library Gizeh:

+
import gizeh
+import moviepy.editor as mpy
+
+def make_frame(t):
+    surface = gizeh.Surface(128,128) # width, height
+    radius = W*(1+ (t*(2-t))**2 )/6 # the radius varies over time
+    circle = gizeh.circle(radius, xy = (64,64), fill=(1,0,0))
+    circle.draw(surface)
+    return surface.get_npimage() # returns a 8-bit RGB array
+
+clip = mpy.VideoClip(make_frame, duration=2) # 2 seconds
+clip.write_gif("circle.gif",fps=15)
+
+
+../_images/circle.gif +

Note that clips make with a make_frame do not have an explicit frame rate, so you must provide a frame rate (fps, frames er second) for write_gif and write_videofile, and more generally for any methods that requires iterating through the frames.

+
+
+

VideoFileClip

+

A VideoFileClip is a clip read from a video file (most formats are supported) or a GIF file. You load the video as follows:

+
myclip = VideoFileClip("some_video.avi")
+myclip = VideoFileClip("some_animation.gif")
+
+
+

Note that these clips will have an fps (frame per second) attribute, which will be transmitted if you do small modifications of the clip, and will be used by default in write_videofile, write_gif, etc. For instance:

+
myclip = VideoFileClip("some_video.avi")
+print (myclip.fps) # prints for instance '30'
+# Now cut the clip between t=10 and 25 secs. This conserves the fps.
+myclip2 = myclip.subclip(10, 25)
+myclip2.write_gif("test.gif") # the gif will have 30 fps
+
+
+

For more, see VideoFileClip.

+
+
+

ImageSequenceClip

+

This is a clip made from a series of images, you call it with

+
clip = ImageSequenceClip(images_list, fps=25)
+
+
+

where images_list can be either a list of image names (that will be played) in that order, a folder name (at which case all the image files in the folder will be played in alphanumerical order), or a list of frames (Numpy arrays), obtained for instance from other clips.

+

When you provide a folder name or list of file names, you can choose load_images=True to specify that all images should be loaded into the RAM. This is only interesting if you have a small number of images that will be each used more than once (e.g. if the images form a looping animation).

+
+
+

ImageClip

+

An ImageClip is a video clip that always displays the same image. You can create one as follows:

+
myclip = ImageClip("some_picture.jpeg")
+myclip = ImageClip(somme_array) # a (height x width x 3) RGB numpy array
+myclip = some_video_clip.to_ImageClip(t='01:00:00') # frame at t=1 hour.
+
+
+

For more, see ImageClip.

+

Two examples of ImageClip shown below are the TextClip and ColorClip

+
+
+

TextClip

+

Generating a TextClip requires to have ImageMagick installed and (for windows users) linked to MoviePy, see the installation instructions.

+

Here is how you make a textclip (you won’t need all these options all the time):

+
myclip = TextClip("Hello", font='Amiri-Bold')
+
+
+

The font can be any font installed on your computer, but ImageMagick will have specific names for it. For instance the normal Amiri font will be called Amiri-Regular while the Impact font will be called Impact-Normal. To get a list of the possible fonts, type TextClip.list('font'). To find all the font names related to a given font, use for instance

+
TextClip.search('Amiri', 'font') # Returns all font names containing Amiri
+
+
+

Note also that the use of a stroke (or contour) will not work well on small letters, so if you need a small text with a contour, it is better to generate a big text, then downsize it:

+
myclip = TextClip("Hello", fontsize=70, stroke_width=5).resize(height=15)
+
+
+

TextClips have many, many options: alignment, kerning (distance between the letters), stroke size, background, word wrapping, etc. see TextClip for more.

+
+
+
+

Mask clips

+

A mask is a special video clip which indicates which pixels will be visible when a video clip carrying this mask will be composed with other video clips (see Mixing clips). Masks are also used to define transparency when you export the clip as GIF file or as a PNG.

+

The fundamental difference between masks and standard clips is that standard clips output frames with 3 components (R-G-B) per pixel, comprised between 0 and 255, while a mask has just one composant per pixel, between 0 and 1 (1 indicating a fully visible pixel and 0 a transparent pixel). Seen otherwise, a mask is always in greyscale.

+

When you create or load a clip that you will use as a mask you need to declare it:

+
maskclip = VideoClip(makeframe, duration=4, ismask=True)
+maskclip = ImageClip("my_mask.jpeg", ismask=True)
+maskclip = VideoFileClip("myvideo.mp4", ismask=True)
+
+
+

In the case of video and image files, if these are not already black and white they will be converted automatically.

+

Then you attach this mask to a clip (which must have the same dimensions) with myclip.set_mask(maskclip).

+

Some image formats like PNG support transparency with an alpha layer, which MoviePy will use as a mask:

+
myclip = ImageClip("image.png", transparent=True) # True is the default
+myclip.mask # <- the alpha layer of the picture.
+
+
+

Any video clip can be turned into a mask with clip.to_mask(), and a mask can be turned to a standard RGB video clip with my_mask_clip.to_RGB().

+

Masks are treated differently by many methods (because their frames are different) but you can do with a mask pretty much everything you can do with a standard clip: you can cut it, edit it, preview it, write it to a video file, make snapshots, etc.

+
+
+

Exporting video clips

+
+

Video files (.mp4, .webm, .ogv…)

+

To write a clip as a video file, use

+
my_clip.write_videofile("movie.mp4") # default codec: 'libx264', 24 fps
+my_clip.write_videofile("movie.mp4",fps=15)
+my_clip.write_videofile("movie.webm") # webm format
+my_clip.write_videofile("movie.webm",audio=False) # don't render audio.
+
+
+

MoviePy has default codec names for the most common file extensions. If you want to use exotic formats or if you are not happy with the defaults you can provide the codec with codec='mpeg4' for instance. There are many many options when you are writing a video (bitrate, parameters of the audio writing, file size optimization, number of processors to use, etc.). Please refer to write_videofile() for more.

+

Sometimes it is impossible for MoviePy to guess the duration attribute of the clip (keep in mind that some clips, like ImageClips displaying a picture, have a priori an infinite duration). Then, the duration must be set manually with clip.set_duration:

+
# Make a video showing a flower for 5 seconds
+my_clip = Image("flower.jpeg") # has infinite duration
+my_clip.write_videofile("flower.mp4") # Will fail ! NO DURATION !
+my_clip.set_duration(5).write_videofile("flower.mp4") # works !
+
+
+
+
+

Animated GIFs

+

To write your video as an animated GIF, use

+
my_clip.write_gif('test.gif', fps=12)
+
+
+

Note that this requires ImageMagick installed. Otherwise you can also create the GIF with ffmpeg by adding the option program='ffmpeg', it will be much faster but won’t look as nice and won’t be optimized.

+

There are many options to optimize the quality and size of a gif. Please refer to write_gif().

+

Note that for editing gifs the best way is to preview them in the notebook as explained here: ipython_display

+

For examples of use, see this blog post for informations on making GIFs from video files, and this other post for GIF animations with vector graphics.

+
+
+

Export images

+

You can write a frame to an image file with

+
myclip.save_frame("frame.png") # by default the first frame is extracted
+myclip.save_frame("frame.jpeg", t='01:00:00') # frame at time t=1h
+
+
+

If the clip has a mask it will be exported as the alpha layer of the image unless you specify withmask=False.

+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/getting_started/working_with_matplotlib.html b/v1.0.3/getting_started/working_with_matplotlib.html new file mode 100644 index 000000000..c432575ba --- /dev/null +++ b/v1.0.3/getting_started/working_with_matplotlib.html @@ -0,0 +1,306 @@ + + + + + + + + + + Working with matplotlib — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Working with matplotlib

+
+

Defining custom animations

+

MoviePy allows you to produce custom animations by defining a function that returns a frame at a given time of the animation in the form of a numpy array.

+

An example of this workflow is below:

+
from moviepy.editor import VideoClip
+
+def make_frame(t):
+    """Returns an image of the frame for time t."""
+    # ... create the frame with any library here ...
+    return frame_for_time_t # (Height x Width x 3) Numpy array
+
+animation = VideoClip(make_frame, duration=3) # 3-second clip
+
+
+

This animation can then be exported by the usual MoviePy means:

+
# export as a video file
+animation.write_videofile("my_animation.mp4", fps=24)
+# export as a GIF
+animation.write_gif("my_animation.gif", fps=24) # usually slower
+
+
+
+
+

Simple matplotlib example

+

An example of an animation using matplotlib can then be as follows:

+
import matplotlib.pyplot as plt
+import numpy as np
+from moviepy.editor import VideoClip
+from moviepy.video.io.bindings import mplfig_to_npimage
+
+x = np.linspace(-2, 2, 200)
+
+duration = 2
+
+fig, ax = plt.subplots()
+def make_frame(t):
+    ax.clear()
+    ax.plot(x, np.sinc(x**2) + np.sin(x + 2*np.pi/duration * t), lw=3)
+    ax.set_ylim(-1.5, 2.5)
+    return mplfig_to_npimage(fig)
+
+animation = VideoClip(make_frame, duration=duration)
+animation.write_gif('matplotlib.gif', fps=20)
+
+
+
+
+

Working in the Jupyter Notebook

+

If you are working inside a Jupyter Notebook, you can take advantage of the fact that VideoClips can be embedded in the output cells of the notebook with the ipython_display method. The above example then becomes:

+
import matplotlib.pyplot as plt
+import numpy as np
+from moviepy.editor import VideoClip
+from moviepy.video.io.bindings import mplfig_to_npimage
+
+x = np.linspace(-2, 2, 200)
+
+duration = 2
+
+fig, ax = plt.subplots()
+def make_frame(t):
+    ax.clear()
+    ax.plot(x, np.sinc(x**2) + np.sin(x + 2*np.pi/duration * t), lw=3)
+    ax.set_ylim(-1.5, 2.5)
+    return mplfig_to_npimage(fig)
+
+animation = VideoClip(make_frame, duration=duration)
+animation.ipython_display(fps=20, loop=True, autoplay=True)
+
+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/index.html b/v1.0.3/index.html new file mode 100644 index 000000000..4af9ad03c --- /dev/null +++ b/v1.0.3/index.html @@ -0,0 +1,248 @@ + + + + + + + + + + User Guide — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + _images/logo.png +

MoviePy is a Python module for video editing, which can be used for basic operations (like cuts, concatenations, title insertions), video compositing (a.k.a. non-linear editing), video processing, or to create advanced effects. It can read and write the most common video formats, including GIF.

+

Here it is in action (run in an IPython Notebook):

+_images/demo_preview1.jpeg + +
+

Contribute !

+

MoviePy is an open source software originally written by Zulko and released under the MIT licence. It works on Windows, Mac, and Linux, with Python 2 or Python 3. The code is hosted on Github, where you can push improvements, report bugs and ask for help. There is also a MoviePy forum on Reddit and a mailing list on librelist .

+ + + +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/install.html b/v1.0.3/install.html new file mode 100644 index 000000000..137270f4f --- /dev/null +++ b/v1.0.3/install.html @@ -0,0 +1,262 @@ + + + + + + + + + + Download and Installation — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Download and Installation

+
+

Installation

+

Method with pip: if you have pip installed, just type this in a terminal (it will install ez_setup if you don’t already have it)

+
(sudo) pip install moviepy
+
+
+

If you have neither setuptools nor ez_setup installed the command above will fail, is this case type this before installing:

+
(sudo) pip install ez_setup
+
+
+

Method by hand: download the sources, either on PyPI or (if you want the development version) on Github, unzip everything in one folder, open a terminal and type

+
(sudo) python setup.py install
+
+
+

MoviePy depends on the Python modules Numpy, imageio, Decorator, and tqdm, which will be automatically installed during MoviePy’s installation. It should work on Windows/Mac/Linux, with Python 2.7+ and 3 ; if you have trouble installing MoviePy or one of its dependencies, please provide feedback !

+

MoviePy depends on the software FFMPEG for video reading and writing. You don’t need to worry about that, as FFMPEG should be automatically downloaded/installed by ImageIO during your first use of MoviePy (it takes a few seconds). If you want to use a specific version of FFMPEG, you can set the FFMPEG_BINARY environment variable See moviepy/config_defaults.py for details.

+
+

Other optional but useful dependencies

+

ImageMagick is not strictly required, only if you want to write texts. It can also be used as a backend for GIFs but you can do GIFs with MoviePy without ImageMagick.

+

Once you have installed it, ImageMagick will be automatically detected by MoviePy, except on Windows !. Windows user, before installing MoviePy by hand, go into the moviepy/config_defaults.py file and provide the path to the ImageMagick binary called magick. It should look like this

+
IMAGEMAGICK_BINARY = "C:\\Program Files\\ImageMagick_VERSION\\magick.exe"
+
+
+

You can also set the IMAGEMAGICK_BINARY environment variable See moviepy/config_defaults.py for details.

+

If you are using an older version of ImageMagick, keep in mind the name of the executable is not magick.exe but convert.exe. In that case, the IMAGEMAGICK_BINARY property should be C:\\Program Files\\ImageMagick_VERSION\\convert.exe

+

PyGame is needed for video and sound previews (useless if you intend to work with MoviePy on a server but really essential for advanced video editing by hand).

+

For advanced image processing you will need one or several of these packages. For instance using the method clip.resize requires that at least one of Scipy, PIL, Pillow or OpenCV are installed.

+
    +
  • The Python Imaging Library (PIL) or, better, its branch Pillow .
  • +
  • Scipy (for tracking, segmenting, etc.), and can be used for resizing video clips if PIL and OpenCV aren’t installed on your computer.
  • +
  • Scikit Image may be needed for some advanced image manipulation.
  • +
  • OpenCV 2.4.6 or more recent (provides the package cv2) or more recent may be needed for some advanced image manipulation.
  • +
+

If you are on linux, these packages will likely be in your repos.

+
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/objects.inv b/v1.0.3/objects.inv new file mode 100644 index 000000000..9e56e9644 Binary files /dev/null and b/v1.0.3/objects.inv differ diff --git a/v1.0.3/opencv_instructions.html b/v1.0.3/opencv_instructions.html new file mode 100644 index 000000000..115e9c595 --- /dev/null +++ b/v1.0.3/opencv_instructions.html @@ -0,0 +1,256 @@ + + + + + + + + + + So you want to install OpenCV 2.4.6 ? — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • So you want to install OpenCV 2.4.6 ?
  • + + +
  • + + + View page source + + +
  • + +
+ + +
+
+
+
+ +
+

So you want to install OpenCV 2.4.6 ?

+

OpenCV is very optional, its installation is not always simple and I found it to be unstable, be warned ! +The installation seems easy for Windows. On linux, here is what I found on the Internet:

+
    +
  • Remove any other version of OpenCV if you installed it through a package manager.

    +
  • +
  • Unzip the source code of OpenCV 2.4.6 in some folder. open a terminal in this folder.

    +
  • +
  • Make a new directory and go into this directory:

    +
    mkdir release
    +cd release
    +
    +
    +
  • +
  • Run cmake. Here is the line I used:

    +
    cmake -D WITH_TBB=ON -D BUILD_NEW_PYTHON_SUPPORT=ON -D WITH_V4L=OFF -D INSTALL_C_EXAMPLES=ON -D INSTALL_PYTHON_EXAMPLES=ON -D BUILD_EXAMPLES=ON ..
    +
    +
    +
  • +
  • Run make. This may take a few minutes (15 minutes on my computer).

    +
    make
    +
    +
    +
  • +
  • Finally, install.

    +
    sudo make install
    +
    +
    +
  • +
+

And voilà !

+

You can check if it worked by opeing a Python console and typing

+
import cv2
+print cv2.__version__
+
+
+

Advice: do not throw your release folder away. If later you have strange bugs with OpenCV involving .so files, just redo the sudo make install step.

+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/py-modindex.html b/v1.0.3/py-modindex.html new file mode 100644 index 000000000..64b75217a --- /dev/null +++ b/v1.0.3/py-modindex.html @@ -0,0 +1,253 @@ + + + + + + + + + + Python Module Index — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Python Module Index
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ + +

Python Module Index

+ +
+ m +
+ + + + + + + + + + + + + + + + + + + + + + +
 
+ m
+ moviepy +
    + moviepy.decorators +
    + moviepy.video.io.ffmpeg_tools +
    + moviepy.video.tools.credits +
    + moviepy.video.tools.drawing +
    + moviepy.video.tools.subtitles +
+ + +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/AudioClip.html b/v1.0.3/ref/AudioClip.html new file mode 100644 index 000000000..75a3ba743 --- /dev/null +++ b/v1.0.3/ref/AudioClip.html @@ -0,0 +1,1432 @@ + + + + + + + + + + AudioClip — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

AudioClip

+
+

AudioClip

+
+
+class moviepy.audio.AudioClip.AudioClip(make_frame=None, duration=None, fps=None)[source]
+

Bases: moviepy.Clip.Clip

+

Base class for audio clips.

+

See AudioFileClip and CompositeSoundClip for usable classes.

+

An AudioClip is a Clip with a make_frame attribute of +the form `` t -> [ f_t ]`` for mono sound and +t-> [ f1_t, f2_t ] for stereo sound (the arrays are Numpy arrays). +The f_t are floats between -1 and 1. These bounds can be +trespassed wihtout problems (the program will put the +sound back into the bounds at conversion time, without much impact).

+ +++ + + + +
Parameters:
+
make_frame
+

A function t-> frame at time t. The frame does not mean much +for a sound, it is just a float. What ‘makes’ the sound are +the variations of that float in the time.

+
+
nchannels
+

Number of channels (one or two for mono or stereo).

+
+
+
+

Examples

+
>>> # Plays the note A (a sine wave of frequency 440HZ)
+>>> import numpy as np
+>>> make_frame = lambda t: 2*[ np.sin(440 * 2 * np.pi * t) ]
+>>> clip = AudioClip(make_frame, duration=5)
+>>> clip.preview()
+
+
+
+
+close(self)
+

Release any resources that are in use.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fun, apply_to=None, keep_duration=True)
+

General processing of a clip.

+

Returns a new Clip whose frames are a transformation +(through function fun) of the frames of the current clip.

+ +++ + + + +
Parameters:
+
fun
+

A function with signature (gf,t -> frame) where gf will +represent the current clip’s get_frame method, +i.e. gf is a function (t->image). Parameter t is a time +in seconds, frame is a picture (=Numpy array) which will be +returned by the transformed clip (see examples below).

+
+
apply_to
+

Can be either 'mask', or 'audio', or +['mask','audio']. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration
+

Set to True if the transformation does not change the +duration of the clip.

+
+
+
+

Examples

+

In the following newclip a 100 pixels-high clip whose video +content scrolls from the top to the bottom of the frames of +clip.

+
>>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :]
+>>> newclip = clip.fl(fl, apply_to='mask')
+
+
+
+ +
+
+fl_time(self, t_func, apply_to=None, keep_duration=False)
+

Returns a Clip instance playing the content of the current clip +but with a modified timeline, time t being replaced by another +time t_func(t).

+ +++ + + + +
Parameters:
+
t_func:
+

A function t-> new_t

+
+
apply_to:
+

Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration:
+

False (default) if the transformation modifies the +duration of the clip.

+
+
+
+

Examples

+
>>> # plays the clip (and its mask and sound) twice faster
+>>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio'])
+>>>
+>>> # plays the clip starting at t=3, and backwards:
+>>> newclip = clip.fl_time(lambda: 3-t)
+
+
+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_chunks(self, chunksize=None, chunk_duration=None, fps=None, quantize=False, nbytes=2, logger=None)[source]
+

Iterator that returns the whole sound array of the clip by chunks

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, make_frame)
+

Sets a make_frame attribute for the clip. Useful for setting +arbitrary/complicated videoclips.

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+to_audiofile(*a, **kw)
+

The function to_audiofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_audiofile, instead.

+
+ +
+
+to_soundarray(self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000)[source]
+

Transforms the sound into an array that can be played by pygame +or written in a wav file. See AudioClip.preview.

+ +++ + + + +
Parameters:
+
fps
+

Frame rate of the sound for the conversion. +44100 for top quality.

+
+
nbytes
+

Number of bytes to encode the sound: 1 for 8bit sound, +2 for 16bit, 4 for 32bit sound.

+
+
+
+
+ +
+
+write_audiofile(self, filename, fps=None, nbytes=2, buffersize=2000, codec=None, bitrate=None, ffmpeg_params=None, write_logfile=False, verbose=True, logger='bar')[source]
+

Writes an audio file from the AudioClip.

+ +++ + + + +
Parameters:
+
filename
+

Name of the output file

+
+
fps
+

Frames per second. If not set, it will try default to self.fps if +already set, otherwise it will default to 44100

+
+
nbytes
+

Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound)

+
+
codec
+

Which audio codec should be used. If None provided, the codec is +determined based on the extension of the filename. Choose +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav.

+
+
bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size and quality of the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the output file.

+
+
ffmpeg_params
+

Any additional parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’]

+
+
write_logfile
+

If true, produces a detailed logfile named filename + ‘.log’ +when writing the file

+
+
verbose
+

Boolean indicating whether to print infomation

+
+
logger
+

Either ‘bar’ or None or any Proglog logger

+
+
+
+
+ +
+ +
+
+

AudioFileClip

+
+
+class moviepy.audio.io.AudioFileClip.AudioFileClip(filename, buffersize=200000, nbytes=2, fps=44100)[source]
+

Bases: moviepy.audio.AudioClip.AudioClip

+

An audio clip read from a sound file, or an array. +The whole file is not loaded in memory. Instead, only a portion is +read and stored in memory. this portion includes frames before +and after the last frames read, so that it is fast to read the sound +backward and forward.

+ +++ + + + +
Parameters:
+
filename
+

Either a soundfile name (of any extension supported by ffmpeg) +or an array representing a sound. If the soundfile is not a .wav, +it will be converted to .wav first, using the fps and +bitrate arguments.

+
+
buffersize:
+

Size to load in memory (in number of frames)

+
+
+
+

Examples

+
>>> snd = AudioFileClip("song.wav")
+>>> snd.close()
+>>> snd = AudioFileClip("song.mp3", fps = 44100)
+>>> second_reader = snd.coreader()
+>>> second_reader.close()
+>>> snd.close()
+>>> with AudioFileClip(mySoundArray, fps=44100) as snd:  # from a numeric array
+>>>     pass  # Close is implicitly performed by context manager.
+
+
+ +++ + + + +
Attributes:
+
nbytes
+

Number of bits per frame of the original audio file.

+
+
fps
+

Number of frames per second in the audio file

+
+
buffersize
+

See Parameters.

+
+
+
+
+
+close(self)[source]
+

Close the internal reader.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+coreader(self)[source]
+

Returns a copy of the AudioFileClip, i.e. a new entrance point +to the audio file. Use copy when you have different clips +watching the audio file at different times.

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fun, apply_to=None, keep_duration=True)
+

General processing of a clip.

+

Returns a new Clip whose frames are a transformation +(through function fun) of the frames of the current clip.

+ +++ + + + +
Parameters:
+
fun
+

A function with signature (gf,t -> frame) where gf will +represent the current clip’s get_frame method, +i.e. gf is a function (t->image). Parameter t is a time +in seconds, frame is a picture (=Numpy array) which will be +returned by the transformed clip (see examples below).

+
+
apply_to
+

Can be either 'mask', or 'audio', or +['mask','audio']. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration
+

Set to True if the transformation does not change the +duration of the clip.

+
+
+
+

Examples

+

In the following newclip a 100 pixels-high clip whose video +content scrolls from the top to the bottom of the frames of +clip.

+
>>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :]
+>>> newclip = clip.fl(fl, apply_to='mask')
+
+
+
+ +
+
+fl_time(self, t_func, apply_to=None, keep_duration=False)
+

Returns a Clip instance playing the content of the current clip +but with a modified timeline, time t being replaced by another +time t_func(t).

+ +++ + + + +
Parameters:
+
t_func:
+

A function t-> new_t

+
+
apply_to:
+

Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration:
+

False (default) if the transformation modifies the +duration of the clip.

+
+
+
+

Examples

+
>>> # plays the clip (and its mask and sound) twice faster
+>>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio'])
+>>>
+>>> # plays the clip starting at t=3, and backwards:
+>>> newclip = clip.fl_time(lambda: 3-t)
+
+
+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_chunks(self, chunksize=None, chunk_duration=None, fps=None, quantize=False, nbytes=2, logger=None)
+

Iterator that returns the whole sound array of the clip by chunks

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, make_frame)
+

Sets a make_frame attribute for the clip. Useful for setting +arbitrary/complicated videoclips.

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+to_audiofile(*a, **kw)
+

The function to_audiofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_audiofile, instead.

+
+ +
+
+to_soundarray(self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000)
+

Transforms the sound into an array that can be played by pygame +or written in a wav file. See AudioClip.preview.

+ +++ + + + +
Parameters:
+
fps
+

Frame rate of the sound for the conversion. +44100 for top quality.

+
+
nbytes
+

Number of bytes to encode the sound: 1 for 8bit sound, +2 for 16bit, 4 for 32bit sound.

+
+
+
+
+ +
+
+write_audiofile(self, filename, fps=None, nbytes=2, buffersize=2000, codec=None, bitrate=None, ffmpeg_params=None, write_logfile=False, verbose=True, logger='bar')
+

Writes an audio file from the AudioClip.

+ +++ + + + +
Parameters:
+
filename
+

Name of the output file

+
+
fps
+

Frames per second. If not set, it will try default to self.fps if +already set, otherwise it will default to 44100

+
+
nbytes
+

Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound)

+
+
codec
+

Which audio codec should be used. If None provided, the codec is +determined based on the extension of the filename. Choose +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav.

+
+
bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size and quality of the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the output file.

+
+
ffmpeg_params
+

Any additional parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’]

+
+
write_logfile
+

If true, produces a detailed logfile named filename + ‘.log’ +when writing the file

+
+
verbose
+

Boolean indicating whether to print infomation

+
+
logger
+

Either ‘bar’ or None or any Proglog logger

+
+
+
+
+ +
+ +
+
+

CompositeAudioClip

+
+
+class moviepy.audio.AudioClip.CompositeAudioClip(clips)[source]
+

Bases: moviepy.audio.AudioClip.AudioClip

+

Clip made by composing several AudioClips.

+

An audio clip made by putting together several audio clips.

+ +++ + + + +
Parameters:
+
clips
+

List of audio clips, which may start playing at different times or +together. If all have their duration attribute set, the +duration of the composite clip is computed automatically.

+
+
+
+
+
+close(self)
+

Release any resources that are in use.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fun, apply_to=None, keep_duration=True)
+

General processing of a clip.

+

Returns a new Clip whose frames are a transformation +(through function fun) of the frames of the current clip.

+ +++ + + + +
Parameters:
+
fun
+

A function with signature (gf,t -> frame) where gf will +represent the current clip’s get_frame method, +i.e. gf is a function (t->image). Parameter t is a time +in seconds, frame is a picture (=Numpy array) which will be +returned by the transformed clip (see examples below).

+
+
apply_to
+

Can be either 'mask', or 'audio', or +['mask','audio']. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration
+

Set to True if the transformation does not change the +duration of the clip.

+
+
+
+

Examples

+

In the following newclip a 100 pixels-high clip whose video +content scrolls from the top to the bottom of the frames of +clip.

+
>>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :]
+>>> newclip = clip.fl(fl, apply_to='mask')
+
+
+
+ +
+
+fl_time(self, t_func, apply_to=None, keep_duration=False)
+

Returns a Clip instance playing the content of the current clip +but with a modified timeline, time t being replaced by another +time t_func(t).

+ +++ + + + +
Parameters:
+
t_func:
+

A function t-> new_t

+
+
apply_to:
+

Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration:
+

False (default) if the transformation modifies the +duration of the clip.

+
+
+
+

Examples

+
>>> # plays the clip (and its mask and sound) twice faster
+>>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio'])
+>>>
+>>> # plays the clip starting at t=3, and backwards:
+>>> newclip = clip.fl_time(lambda: 3-t)
+
+
+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_chunks(self, chunksize=None, chunk_duration=None, fps=None, quantize=False, nbytes=2, logger=None)
+

Iterator that returns the whole sound array of the clip by chunks

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, make_frame)
+

Sets a make_frame attribute for the clip. Useful for setting +arbitrary/complicated videoclips.

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+to_audiofile(*a, **kw)
+

The function to_audiofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_audiofile, instead.

+
+ +
+
+to_soundarray(self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000)
+

Transforms the sound into an array that can be played by pygame +or written in a wav file. See AudioClip.preview.

+ +++ + + + +
Parameters:
+
fps
+

Frame rate of the sound for the conversion. +44100 for top quality.

+
+
nbytes
+

Number of bytes to encode the sound: 1 for 8bit sound, +2 for 16bit, 4 for 32bit sound.

+
+
+
+
+ +
+
+write_audiofile(self, filename, fps=None, nbytes=2, buffersize=2000, codec=None, bitrate=None, ffmpeg_params=None, write_logfile=False, verbose=True, logger='bar')
+

Writes an audio file from the AudioClip.

+ +++ + + + +
Parameters:
+
filename
+

Name of the output file

+
+
fps
+

Frames per second. If not set, it will try default to self.fps if +already set, otherwise it will default to 44100

+
+
nbytes
+

Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound)

+
+
codec
+

Which audio codec should be used. If None provided, the codec is +determined based on the extension of the filename. Choose +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav.

+
+
bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size and quality of the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the output file.

+
+
ffmpeg_params
+

Any additional parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’]

+
+
write_logfile
+

If true, produces a detailed logfile named filename + ‘.log’ +when writing the file

+
+
verbose
+

Boolean indicating whether to print infomation

+
+
logger
+

Either ‘bar’ or None or any Proglog logger

+
+
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/Clip.html b/v1.0.3/ref/Clip.html new file mode 100644 index 000000000..e647e4abf --- /dev/null +++ b/v1.0.3/ref/Clip.html @@ -0,0 +1,531 @@ + + + + + + + + + + Clip — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Clip

+
+

Clip

+
+
+class moviepy.Clip.Clip[source]
+

Bases: object

+

Base class of all clips (VideoClips and AudioClips).

+ +++ + + + +
Attributes:
+
start:
+

When the clip is included in a composition, time of the +composition at which the clip starts playing (in seconds).

+
+
end:
+

When the clip is included in a composition, time of the +composition at which the clip stops playing (in seconds).

+
+
duration:
+

Duration of the clip (in seconds). Some clips are infinite, in +this case their duration will be None.

+
+
+
+
+
+close(self)[source]
+

Release any resources that are in use.

+
+ +
+
+copy(self)[source]
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)[source]
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fun, apply_to=None, keep_duration=True)[source]
+

General processing of a clip.

+

Returns a new Clip whose frames are a transformation +(through function fun) of the frames of the current clip.

+ +++ + + + +
Parameters:
+
fun
+

A function with signature (gf,t -> frame) where gf will +represent the current clip’s get_frame method, +i.e. gf is a function (t->image). Parameter t is a time +in seconds, frame is a picture (=Numpy array) which will be +returned by the transformed clip (see examples below).

+
+
apply_to
+

Can be either 'mask', or 'audio', or +['mask','audio']. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration
+

Set to True if the transformation does not change the +duration of the clip.

+
+
+
+

Examples

+

In the following newclip a 100 pixels-high clip whose video +content scrolls from the top to the bottom of the frames of +clip.

+
>>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :]
+>>> newclip = clip.fl(fl, apply_to='mask')
+
+
+
+ +
+
+fl_time(self, t_func, apply_to=None, keep_duration=False)[source]
+

Returns a Clip instance playing the content of the current clip +but with a modified timeline, time t being replaced by another +time t_func(t).

+ +++ + + + +
Parameters:
+
t_func:
+

A function t-> new_t

+
+
apply_to:
+

Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration:
+

False (default) if the transformation modifies the +duration of the clip.

+
+
+
+

Examples

+
>>> # plays the clip (and its mask and sound) twice faster
+>>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio'])
+>>>
+>>> # plays the clip starting at t=3, and backwards:
+>>> newclip = clip.fl_time(lambda: 3-t)
+
+
+
+ +
+
+fx(self, func, *args, **kwargs)[source]
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)[source]
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)[source]
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)[source]
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+set_duration(self, t, change_end=True)[source]
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)[source]
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)[source]
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)[source]
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, make_frame)[source]
+

Sets a make_frame attribute for the clip. Useful for setting +arbitrary/complicated videoclips.

+
+ +
+
+set_memoize(self, memoize)[source]
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_start(self, t, change_end=True)[source]
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)[source]
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+ +
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/VideoClip/VideoClip.html b/v1.0.3/ref/VideoClip/VideoClip.html new file mode 100644 index 000000000..88e1953fb --- /dev/null +++ b/v1.0.3/ref/VideoClip/VideoClip.html @@ -0,0 +1,4429 @@ + + + + + + + + + + Classes of Video Clips — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Classes of Video Clips

+
+

VideoClip

+
+
+class moviepy.video.VideoClip.VideoClip(make_frame=None, ismask=False, duration=None, has_constant_size=True)[source]
+

Bases: moviepy.Clip.Clip

+

Base class for video clips.

+

See VideoFileClip, ImageClip etc. for more user-friendly +classes.

+ +++ + + + + + +
Parameters:
+
ismask
+

True if the clip is going to be used as a mask.

+
+
+
Attributes:
+
size
+

The size of the clip, (width,heigth), in pixels.

+
+
w, h
+

The width and height of the clip, in pixels.

+
+
ismask
+

Boolean set to True if the clip is a mask.

+
+
make_frame
+

A function t-> frame at time t where frame is a +w*h*3 RGB array.

+
+
mask (default None)
+
+
VideoClip mask attached to this clip. If mask is None,
+

The video clip is fully opaque.

+
+
+
+
audio (default None)
+

An AudioClip instance containing the audio of the video clip.

+
+
pos
+

A function t->(x,y) where x,y is the position +of the clip when it is composed with other clips. +See VideoClip.set_pos for more details

+
+
relative_pos
+

See variable pos.

+
+
+
+
+
+add_mask(self)[source]
+

Add a mask VideoClip to the VideoClip.

+

Returns a copy of the clip with a completely opaque mask +(made of ones). This makes computations slower compared to +having a None mask but can be useful in many cases. Choose

+

Set constant_size to False for clips with moving +image size.

+
+ +
+
+afx(self, fun, *a, **k)[source]
+

Transform the clip’s audio.

+

Return a new clip whose audio has been transformed by fun.

+
+ +
+
+blit_on(self, picture, t)[source]
+

Returns the result of the blit of the clip’s frame at time t +on the given picture, the position of the clip being given +by the clip’s pos attribute. Meant for compositing.

+
+ +
+
+close(self)
+

Release any resources that are in use.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fun, apply_to=None, keep_duration=True)
+

General processing of a clip.

+

Returns a new Clip whose frames are a transformation +(through function fun) of the frames of the current clip.

+ +++ + + + +
Parameters:
+
fun
+

A function with signature (gf,t -> frame) where gf will +represent the current clip’s get_frame method, +i.e. gf is a function (t->image). Parameter t is a time +in seconds, frame is a picture (=Numpy array) which will be +returned by the transformed clip (see examples below).

+
+
apply_to
+

Can be either 'mask', or 'audio', or +['mask','audio']. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration
+

Set to True if the transformation does not change the +duration of the clip.

+
+
+
+

Examples

+

In the following newclip a 100 pixels-high clip whose video +content scrolls from the top to the bottom of the frames of +clip.

+
>>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :]
+>>> newclip = clip.fl(fl, apply_to='mask')
+
+
+
+ +
+
+fl_image(self, image_func, apply_to=None)[source]
+

Modifies the images of a clip by replacing the frame +get_frame(t) by another frame, image_func(get_frame(t))

+
+ +
+
+fl_time(self, t_func, apply_to=None, keep_duration=False)
+

Returns a Clip instance playing the content of the current clip +but with a modified timeline, time t being replaced by another +time t_func(t).

+ +++ + + + +
Parameters:
+
t_func:
+

A function t-> new_t

+
+
apply_to:
+

Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration:
+

False (default) if the transformation modifies the +duration of the clip.

+
+
+
+

Examples

+
>>> # plays the clip (and its mask and sound) twice faster
+>>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio'])
+>>>
+>>> # plays the clip starting at t=3, and backwards:
+>>> newclip = clip.fl_time(lambda: 3-t)
+
+
+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None)[source]
+

Place the clip on a colored background.

+

Returns a clip made of the current clip overlaid on a color +clip of a possibly bigger size. Can serve to flatten transparent +clips.

+ +++ + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the final clip. +By default it will be the size of the current clip.

+
+
color
+

Background color of the final clip ([R,G,B]).

+
+
pos
+

Position of the clip in the final clip. ‘center’ is the default

+
+
col_opacity
+

Parameter in 0..1 indicating the opacity of the colored +background.

+
+
+
+
+ +
+
+save_frame(self, filename, t=0, withmask=True)[source]
+

Save a clip’s frame to an image file.

+

Saves the frame of clip corresponding to time t in +‘filename’. t can be expressed in seconds (15.35), in +(min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If withmask is True the mask is saved in +the alpha layer of the picture (only works with PNGs).

+
+ +
+
+set_audio(self, audioclip)[source]
+

Attach an AudioClip to the VideoClip.

+

Returns a copy of the VideoClip instance, with the audio +attribute set to audio, which must be an AudioClip instance.

+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, mf)[source]
+

Change the clip’s get_frame.

+

Returns a copy of the VideoClip instance, with the make_frame +attribute set to mf.

+
+ +
+
+set_mask(self, mask)[source]
+

Set the clip’s mask.

+

Returns a copy of the VideoClip with the mask attribute set to +mask, which must be a greyscale (values in 0-1) VideoClip

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_opacity(self, op)[source]
+

Set the opacity/transparency level of the clip.

+

Returns a semi-transparent copy of the clip where the mask is +multiplied by op (any float, normally between 0 and 1).

+
+ +
+
+set_pos(*a, **kw)
+

The function set_pos is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, set_position, instead.

+
+ +
+
+set_position(self, pos, relative=False)[source]
+

Set the clip’s position in compositions.

+

Sets the position that the clip will have when included +in compositions. The argument pos can be either a couple +(x,y) or a function t-> (x,y). x and y mark the +location of the top left corner of the clip, and can be +of several types.

+

Examples

+
>>> clip.set_position((45,150)) # x=45, y=150
+>>>
+>>> # clip horizontally centered, at the top of the picture
+>>> clip.set_position(("center","top"))
+>>>
+>>> # clip is at 40% of the width, 70% of the height:
+>>> clip.set_position((0.4,0.7), relative=True)
+>>>
+>>> # clip's position is horizontally centered, and moving up !
+>>> clip.set_position(lambda t: ('center', 50+t) )
+
+
+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+subfx(self, fx, ta=0, tb=None, **kwargs)[source]
+

Apply a transformation to a part of the clip.

+

Returns a new clip in which the function fun (clip->clip) +has been applied to the subclip between times ta and tb +(in seconds).

+

Examples

+
>>> # The scene between times t=3s and t=6s in ``clip`` will be
+>>> # be played twice slower in ``newclip``
+>>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)
+
+
+
+ +
+
+to_ImageClip(self, t=0, with_mask=True, duration=None)[source]
+

Returns an ImageClip made out of the clip’s frame at time t, +which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+
+ +
+
+to_RGB(self)[source]
+

Return a non-mask video clip made from the mask video clip.

+
+ +
+
+to_gif(*a, **kw)
+

The function to_gif is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_gif, instead.

+
+ +
+
+to_images_sequence(*a, **kw)
+

The function to_images_sequence is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_images_sequence, instead.

+
+ +
+
+to_mask(self, canal=0)[source]
+

Return a mask a video clip made from the clip.

+
+ +
+
+to_videofile(*a, **kw)
+

The function to_videofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_videofile, instead.

+
+ +
+
+without_audio(self)[source]
+

Remove the clip’s audio.

+

Return a copy of the clip with audio set to None.

+
+ +
+
+write_gif(self, filename, fps=None, program='imageio', opt='nq', fuzz=1, verbose=True, loop=0, dispose=False, colors=None, tempfiles=False, logger='bar')[source]
+

Write the VideoClip to a GIF file.

+

Converts a VideoClip into an animated GIF using ImageMagick +or ffmpeg.

+ +++ + + + +
Parameters:
+
filename
+

Name of the resulting gif file.

+
+
fps
+

Number of frames per second (see note below). If it +isn’t provided, then the function will look for the clip’s +fps attribute (VideoFileClip, for instance, have one).

+
+
program
+

Software to use for the conversion, either ‘imageio’ (this will use +the library FreeImage through ImageIO), or ‘ImageMagick’, or ‘ffmpeg’.

+
+
opt
+

Optimalization to apply. If program=’imageio’, opt must be either ‘wu’ +(Wu) or ‘nq’ (Neuquant). If program=’ImageMagick’, +either ‘optimizeplus’ or ‘OptimizeTransparency’.

+
+
fuzz
+

(ImageMagick only) Compresses the GIF by considering that +the colors that are less than fuzz% different are in fact +the same.

+
+
tempfiles
+

Writes every frame to a file instead of passing them in the RAM. +Useful on computers with little RAM. Can only be used with +ImageMagick’ or ‘ffmpeg’.

+
+
progress_bar
+

If True, displays a progress bar

+
+
+
+

Notes

+

The gif will be playing the clip in real time (you can +only change the frame rate). If you want the gif to be played +slower than the clip you will use

+
>>> # slow down clip 50% and make it a gif
+>>> myClip.speedx(0.5).to_gif('myClip.gif')
+
+
+
+ +
+
+write_images_sequence(self, nameformat, fps=None, verbose=True, withmask=True, logger='bar')[source]
+

Writes the videoclip to a sequence of image files.

+ +++ + + + + + +
Parameters:
+
nameformat
+

A filename specifying the numerotation format and extension +of the pictures. For instance “frame%03d.png” for filenames +indexed with 3 digits and PNG format. Also possible: +“some_folder/frame%04d.jpeg”, etc.

+
+
fps
+

Number of frames per second to consider when writing the +clip. If not specified, the clip’s fps attribute will +be used if it has one.

+
+
withmask
+

will save the clip’s mask (if any) as an alpha canal (PNGs only).

+
+
verbose
+

Boolean indicating whether to print information.

+
+
logger
+

Either ‘bar’ (progress bar) or None or any Proglog logger.

+
+
+
Returns:
+
names_list
+

A list of all the files generated.

+
+
+
+

Notes

+

The resulting image sequence can be read using e.g. the class +ImageSequenceClip.

+
+ +
+
+write_videofile(self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, rewrite_audio=True, remove_temp=True, write_logfile=False, verbose=True, threads=None, ffmpeg_params=None, logger='bar')[source]
+

Write the clip to a videofile.

+ +++ + + + +
Parameters:
+
filename
+

Name of the video file to write in. +The extension must correspond to the “codec” used (see below), +or simply be ‘.avi’ (which will work with any codec).

+
+
fps
+

Number of frames per second in the resulting video file. If None is +provided, and the clip has an fps attribute, this fps will be used.

+
+
codec
+

Codec to use for image encoding. Can be any codec supported +by ffmpeg. If the filename is has extension ‘.mp4’, ‘.ogv’, ‘.webm’, +the codec will be set accordingly, but you can still set it if you +don’t like the default. For other extensions, the output filename +must be set accordingly.

+

Some examples of codecs are:

+

'libx264' (default codec for file extension .mp4) +makes well-compressed videos (quality tunable using ‘bitrate’).

+

'mpeg4' (other codec for extension .mp4) can be an alternative +to 'libx264', and produces higher quality videos by default.

+

'rawvideo' (use file extension .avi) will produce +a video of perfect quality, of possibly very huge size.

+

png (use file extension .avi) will produce a video +of perfect quality, of smaller size than with rawvideo.

+

'libvorbis' (use file extension .ogv) is a nice video +format, which is completely free/ open source. However not +everyone has the codecs installed by default on their machine.

+

'libvpx' (use file extension .webm) is tiny a video +format well indicated for web videos (with HTML5). Open source.

+
+
audio
+

Either True, False, or a file name. +If True and the clip has an audio clip attached, this +audio clip will be incorporated as a soundtrack in the movie. +If audio is the name of an audio file, this audio file +will be incorporated as a soundtrack in the movie.

+
+
audiofps
+

frame rate to use when generating the sound.

+
+
temp_audiofile
+

the name of the temporary audiofile to be generated and +incorporated in the the movie, if any.

+
+
audio_codec
+

Which audio codec should be used. Examples are ‘libmp3lame’ +for ‘.mp3’, ‘libvorbis’ for ‘ogg’, ‘libfdk_aac’:’m4a’, +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav. +Default is ‘libmp3lame’, unless the video extension is ‘ogv’ +or ‘webm’, at which case the default is ‘libvorbis’.

+
+
audio_bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size/quality of audio in the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the final file.

+
+
preset
+

Sets the time that FFMPEG will spend optimizing the compression. +Choices are: ultrafast, superfast, veryfast, faster, fast, medium, +slow, slower, veryslow, placebo. Note that this does not impact +the quality of the video, only the size of the video file. So +choose ultrafast when you are in a hurry and file size does not +matter.

+
+
threads
+

Number of threads to use for ffmpeg. Can speed up the writing of +the video on multicore computers.

+
+
ffmpeg_params
+

Any additional ffmpeg parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’].

+
+
write_logfile
+

If true, will write log files for the audio and the video. +These will be files ending with ‘.log’ with the name of the +output file in them.

+
+
logger
+

Either “bar” for progress bar or None or any Proglog logger.

+
+
verbose (deprecated, kept for compatibility)
+

Formerly used for toggling messages on/off. Use logger=None now.

+
+
+
+

Examples

+
>>> from moviepy.editor import VideoFileClip
+>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
+>>> clip.write_videofile("my_new_video.mp4")
+>>> clip.close()
+
+
+
+ +
+ +
+

VideoFileClip

+
+
+class moviepy.video.io.VideoFileClip.VideoFileClip(filename, has_mask=False, audio=True, audio_buffersize=200000, target_resolution=None, resize_algorithm='bicubic', audio_fps=44100, audio_nbytes=2, verbose=False, fps_source='tbr')[source]
+

Bases: moviepy.video.VideoClip.VideoClip

+

A video clip originating from a movie file. For instance:

+
>>> clip = VideoFileClip("myHolidays.mp4")
+>>> clip.close()
+>>> with VideoFileClip("myMaskVideo.avi") as clip2:
+>>>    pass  # Implicit close called by context manager.
+
+
+ +++ + + + + + +
Parameters:
+
filename:
+

The name of the video file. It can have any extension supported +by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc.

+
+
has_mask:
+

Set this to ‘True’ if there is a mask included in the videofile. +Video files rarely contain masks, but some video codecs enable +that. For istance if you have a MoviePy VideoClip with a mask you +can save it to a videofile with a mask. (see also +VideoClip.write_videofile for more details).

+
+
audio:
+

Set to False if the clip doesn’t have any audio or if you do not +wish to read the audio.

+
+
target_resolution:
+

Set to (desired_height, desired_width) to have ffmpeg resize the frames +before returning them. This is much faster than streaming in high-res +and then resizing. If either dimension is None, the frames are resized +by keeping the existing aspect ratio.

+
+
resize_algorithm:
+

The algorithm used for resizing. Default: “bicubic”, other popular +options include “bilinear” and “fast_bilinear”. For more information, see +https://ffmpeg.org/ffmpeg-scaler.html

+
+
fps_source:
+

The fps value to collect from the metadata. Set by default to ‘tbr’, but +can be set to ‘fps’, which may be helpful if importing slow-motion videos +that get messed up otherwise.

+
+
+
Attributes:
+
filename:
+

Name of the original video file.

+
+
fps:
+

Frames per second in the original file.

+
+
Read docs for Clip() and VideoClip() for other, more generic, attributes.
+
+
+
+
+
+add_mask(self)
+

Add a mask VideoClip to the VideoClip.

+

Returns a copy of the clip with a completely opaque mask +(made of ones). This makes computations slower compared to +having a None mask but can be useful in many cases. Choose

+

Set constant_size to False for clips with moving +image size.

+
+ +
+
+afx(self, fun, *a, **k)
+

Transform the clip’s audio.

+

Return a new clip whose audio has been transformed by fun.

+
+ +
+
+blit_on(self, picture, t)
+

Returns the result of the blit of the clip’s frame at time t +on the given picture, the position of the clip being given +by the clip’s pos attribute. Meant for compositing.

+
+ +
+
+close(self)[source]
+

Close the internal reader.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fun, apply_to=None, keep_duration=True)
+

General processing of a clip.

+

Returns a new Clip whose frames are a transformation +(through function fun) of the frames of the current clip.

+ +++ + + + +
Parameters:
+
fun
+

A function with signature (gf,t -> frame) where gf will +represent the current clip’s get_frame method, +i.e. gf is a function (t->image). Parameter t is a time +in seconds, frame is a picture (=Numpy array) which will be +returned by the transformed clip (see examples below).

+
+
apply_to
+

Can be either 'mask', or 'audio', or +['mask','audio']. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration
+

Set to True if the transformation does not change the +duration of the clip.

+
+
+
+

Examples

+

In the following newclip a 100 pixels-high clip whose video +content scrolls from the top to the bottom of the frames of +clip.

+
>>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :]
+>>> newclip = clip.fl(fl, apply_to='mask')
+
+
+
+ +
+
+fl_image(self, image_func, apply_to=None)
+

Modifies the images of a clip by replacing the frame +get_frame(t) by another frame, image_func(get_frame(t))

+
+ +
+
+fl_time(self, t_func, apply_to=None, keep_duration=False)
+

Returns a Clip instance playing the content of the current clip +but with a modified timeline, time t being replaced by another +time t_func(t).

+ +++ + + + +
Parameters:
+
t_func:
+

A function t-> new_t

+
+
apply_to:
+

Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration:
+

False (default) if the transformation modifies the +duration of the clip.

+
+
+
+

Examples

+
>>> # plays the clip (and its mask and sound) twice faster
+>>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio'])
+>>>
+>>> # plays the clip starting at t=3, and backwards:
+>>> newclip = clip.fl_time(lambda: 3-t)
+
+
+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None)
+

Place the clip on a colored background.

+

Returns a clip made of the current clip overlaid on a color +clip of a possibly bigger size. Can serve to flatten transparent +clips.

+ +++ + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the final clip. +By default it will be the size of the current clip.

+
+
color
+

Background color of the final clip ([R,G,B]).

+
+
pos
+

Position of the clip in the final clip. ‘center’ is the default

+
+
col_opacity
+

Parameter in 0..1 indicating the opacity of the colored +background.

+
+
+
+
+ +
+
+save_frame(self, filename, t=0, withmask=True)
+

Save a clip’s frame to an image file.

+

Saves the frame of clip corresponding to time t in +‘filename’. t can be expressed in seconds (15.35), in +(min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If withmask is True the mask is saved in +the alpha layer of the picture (only works with PNGs).

+
+ +
+
+set_audio(self, audioclip)
+

Attach an AudioClip to the VideoClip.

+

Returns a copy of the VideoClip instance, with the audio +attribute set to audio, which must be an AudioClip instance.

+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, mf)
+

Change the clip’s get_frame.

+

Returns a copy of the VideoClip instance, with the make_frame +attribute set to mf.

+
+ +
+
+set_mask(self, mask)
+

Set the clip’s mask.

+

Returns a copy of the VideoClip with the mask attribute set to +mask, which must be a greyscale (values in 0-1) VideoClip

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_opacity(self, op)
+

Set the opacity/transparency level of the clip.

+

Returns a semi-transparent copy of the clip where the mask is +multiplied by op (any float, normally between 0 and 1).

+
+ +
+
+set_pos(*a, **kw)
+

The function set_pos is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, set_position, instead.

+
+ +
+
+set_position(self, pos, relative=False)
+

Set the clip’s position in compositions.

+

Sets the position that the clip will have when included +in compositions. The argument pos can be either a couple +(x,y) or a function t-> (x,y). x and y mark the +location of the top left corner of the clip, and can be +of several types.

+

Examples

+
>>> clip.set_position((45,150)) # x=45, y=150
+>>>
+>>> # clip horizontally centered, at the top of the picture
+>>> clip.set_position(("center","top"))
+>>>
+>>> # clip is at 40% of the width, 70% of the height:
+>>> clip.set_position((0.4,0.7), relative=True)
+>>>
+>>> # clip's position is horizontally centered, and moving up !
+>>> clip.set_position(lambda t: ('center', 50+t) )
+
+
+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+subfx(self, fx, ta=0, tb=None, **kwargs)
+

Apply a transformation to a part of the clip.

+

Returns a new clip in which the function fun (clip->clip) +has been applied to the subclip between times ta and tb +(in seconds).

+

Examples

+
>>> # The scene between times t=3s and t=6s in ``clip`` will be
+>>> # be played twice slower in ``newclip``
+>>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)
+
+
+
+ +
+
+to_ImageClip(self, t=0, with_mask=True, duration=None)
+

Returns an ImageClip made out of the clip’s frame at time t, +which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+
+ +
+
+to_RGB(self)
+

Return a non-mask video clip made from the mask video clip.

+
+ +
+
+to_gif(*a, **kw)
+

The function to_gif is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_gif, instead.

+
+ +
+
+to_images_sequence(*a, **kw)
+

The function to_images_sequence is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_images_sequence, instead.

+
+ +
+
+to_mask(self, canal=0)
+

Return a mask a video clip made from the clip.

+
+ +
+
+to_videofile(*a, **kw)
+

The function to_videofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_videofile, instead.

+
+ +
+
+without_audio(self)
+

Remove the clip’s audio.

+

Return a copy of the clip with audio set to None.

+
+ +
+
+write_gif(self, filename, fps=None, program='imageio', opt='nq', fuzz=1, verbose=True, loop=0, dispose=False, colors=None, tempfiles=False, logger='bar')
+

Write the VideoClip to a GIF file.

+

Converts a VideoClip into an animated GIF using ImageMagick +or ffmpeg.

+ +++ + + + +
Parameters:
+
filename
+

Name of the resulting gif file.

+
+
fps
+

Number of frames per second (see note below). If it +isn’t provided, then the function will look for the clip’s +fps attribute (VideoFileClip, for instance, have one).

+
+
program
+

Software to use for the conversion, either ‘imageio’ (this will use +the library FreeImage through ImageIO), or ‘ImageMagick’, or ‘ffmpeg’.

+
+
opt
+

Optimalization to apply. If program=’imageio’, opt must be either ‘wu’ +(Wu) or ‘nq’ (Neuquant). If program=’ImageMagick’, +either ‘optimizeplus’ or ‘OptimizeTransparency’.

+
+
fuzz
+

(ImageMagick only) Compresses the GIF by considering that +the colors that are less than fuzz% different are in fact +the same.

+
+
tempfiles
+

Writes every frame to a file instead of passing them in the RAM. +Useful on computers with little RAM. Can only be used with +ImageMagick’ or ‘ffmpeg’.

+
+
progress_bar
+

If True, displays a progress bar

+
+
+
+

Notes

+

The gif will be playing the clip in real time (you can +only change the frame rate). If you want the gif to be played +slower than the clip you will use

+
>>> # slow down clip 50% and make it a gif
+>>> myClip.speedx(0.5).to_gif('myClip.gif')
+
+
+
+ +
+
+write_images_sequence(self, nameformat, fps=None, verbose=True, withmask=True, logger='bar')
+

Writes the videoclip to a sequence of image files.

+ +++ + + + + + +
Parameters:
+
nameformat
+

A filename specifying the numerotation format and extension +of the pictures. For instance “frame%03d.png” for filenames +indexed with 3 digits and PNG format. Also possible: +“some_folder/frame%04d.jpeg”, etc.

+
+
fps
+

Number of frames per second to consider when writing the +clip. If not specified, the clip’s fps attribute will +be used if it has one.

+
+
withmask
+

will save the clip’s mask (if any) as an alpha canal (PNGs only).

+
+
verbose
+

Boolean indicating whether to print information.

+
+
logger
+

Either ‘bar’ (progress bar) or None or any Proglog logger.

+
+
+
Returns:
+
names_list
+

A list of all the files generated.

+
+
+
+

Notes

+

The resulting image sequence can be read using e.g. the class +ImageSequenceClip.

+
+ +
+
+write_videofile(self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, rewrite_audio=True, remove_temp=True, write_logfile=False, verbose=True, threads=None, ffmpeg_params=None, logger='bar')
+

Write the clip to a videofile.

+ +++ + + + +
Parameters:
+
filename
+

Name of the video file to write in. +The extension must correspond to the “codec” used (see below), +or simply be ‘.avi’ (which will work with any codec).

+
+
fps
+

Number of frames per second in the resulting video file. If None is +provided, and the clip has an fps attribute, this fps will be used.

+
+
codec
+

Codec to use for image encoding. Can be any codec supported +by ffmpeg. If the filename is has extension ‘.mp4’, ‘.ogv’, ‘.webm’, +the codec will be set accordingly, but you can still set it if you +don’t like the default. For other extensions, the output filename +must be set accordingly.

+

Some examples of codecs are:

+

'libx264' (default codec for file extension .mp4) +makes well-compressed videos (quality tunable using ‘bitrate’).

+

'mpeg4' (other codec for extension .mp4) can be an alternative +to 'libx264', and produces higher quality videos by default.

+

'rawvideo' (use file extension .avi) will produce +a video of perfect quality, of possibly very huge size.

+

png (use file extension .avi) will produce a video +of perfect quality, of smaller size than with rawvideo.

+

'libvorbis' (use file extension .ogv) is a nice video +format, which is completely free/ open source. However not +everyone has the codecs installed by default on their machine.

+

'libvpx' (use file extension .webm) is tiny a video +format well indicated for web videos (with HTML5). Open source.

+
+
audio
+

Either True, False, or a file name. +If True and the clip has an audio clip attached, this +audio clip will be incorporated as a soundtrack in the movie. +If audio is the name of an audio file, this audio file +will be incorporated as a soundtrack in the movie.

+
+
audiofps
+

frame rate to use when generating the sound.

+
+
temp_audiofile
+

the name of the temporary audiofile to be generated and +incorporated in the the movie, if any.

+
+
audio_codec
+

Which audio codec should be used. Examples are ‘libmp3lame’ +for ‘.mp3’, ‘libvorbis’ for ‘ogg’, ‘libfdk_aac’:’m4a’, +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav. +Default is ‘libmp3lame’, unless the video extension is ‘ogv’ +or ‘webm’, at which case the default is ‘libvorbis’.

+
+
audio_bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size/quality of audio in the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the final file.

+
+
preset
+

Sets the time that FFMPEG will spend optimizing the compression. +Choices are: ultrafast, superfast, veryfast, faster, fast, medium, +slow, slower, veryslow, placebo. Note that this does not impact +the quality of the video, only the size of the video file. So +choose ultrafast when you are in a hurry and file size does not +matter.

+
+
threads
+

Number of threads to use for ffmpeg. Can speed up the writing of +the video on multicore computers.

+
+
ffmpeg_params
+

Any additional ffmpeg parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’].

+
+
write_logfile
+

If true, will write log files for the audio and the video. +These will be files ending with ‘.log’ with the name of the +output file in them.

+
+
logger
+

Either “bar” for progress bar or None or any Proglog logger.

+
+
verbose (deprecated, kept for compatibility)
+

Formerly used for toggling messages on/off. Use logger=None now.

+
+
+
+

Examples

+
>>> from moviepy.editor import VideoFileClip
+>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
+>>> clip.write_videofile("my_new_video.mp4")
+>>> clip.close()
+
+
+
+ +
+ +
+
+

ImageClip

+
+
+class moviepy.video.VideoClip.ImageClip(img, ismask=False, transparent=True, fromalpha=False, duration=None)[source]
+

Bases: moviepy.video.VideoClip.VideoClip

+

Class for non-moving VideoClips.

+

A video clip originating from a picture. This clip will simply +display the given picture at all times.

+ +++ + + + +
Parameters:
+
img
+

Any picture file (png, tiff, jpeg, etc.) or any array representing +an RGB image (for instance a frame from a VideoClip).

+
+
ismask
+

Set this parameter to True if the clip is a mask.

+
+
transparent
+

Set this parameter to True (default) if you want the alpha layer +of the picture (if it exists) to be used as a mask.

+
+
+
+

Examples

+
>>> clip = ImageClip("myHouse.jpeg")
+>>> clip = ImageClip( someArray ) # a Numpy array represent
+
+
+ +++ + + + +
Attributes:
+
img
+

Array representing the image of the clip.

+
+
+
+
+
+add_mask(self)
+

Add a mask VideoClip to the VideoClip.

+

Returns a copy of the clip with a completely opaque mask +(made of ones). This makes computations slower compared to +having a None mask but can be useful in many cases. Choose

+

Set constant_size to False for clips with moving +image size.

+
+ +
+
+afx(self, fun, *a, **k)
+

Transform the clip’s audio.

+

Return a new clip whose audio has been transformed by fun.

+
+ +
+
+blit_on(self, picture, t)
+

Returns the result of the blit of the clip’s frame at time t +on the given picture, the position of the clip being given +by the clip’s pos attribute. Meant for compositing.

+
+ +
+
+close(self)
+

Release any resources that are in use.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fl, apply_to=None, keep_duration=True)[source]
+

General transformation filter.

+

Equivalent to VideoClip.fl . The result is no more an +ImageClip, it has the class VideoClip (since it may be animated)

+
+ +
+
+fl_image(self, image_func, apply_to=None)[source]
+

Image-transformation filter.

+

Does the same as VideoClip.fl_image, but for ImageClip the +tranformed clip is computed once and for all at the beginning, +and not for each ‘frame’.

+
+ +
+
+fl_time(self, time_func, apply_to=None, keep_duration=False)[source]
+

Time-transformation filter.

+

Applies a transformation to the clip’s timeline +(see Clip.fl_time).

+

This method does nothing for ImageClips (but it may affect their +masks or their audios). The result is still an ImageClip.

+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None)
+

Place the clip on a colored background.

+

Returns a clip made of the current clip overlaid on a color +clip of a possibly bigger size. Can serve to flatten transparent +clips.

+ +++ + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the final clip. +By default it will be the size of the current clip.

+
+
color
+

Background color of the final clip ([R,G,B]).

+
+
pos
+

Position of the clip in the final clip. ‘center’ is the default

+
+
col_opacity
+

Parameter in 0..1 indicating the opacity of the colored +background.

+
+
+
+
+ +
+
+save_frame(self, filename, t=0, withmask=True)
+

Save a clip’s frame to an image file.

+

Saves the frame of clip corresponding to time t in +‘filename’. t can be expressed in seconds (15.35), in +(min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If withmask is True the mask is saved in +the alpha layer of the picture (only works with PNGs).

+
+ +
+
+set_audio(self, audioclip)
+

Attach an AudioClip to the VideoClip.

+

Returns a copy of the VideoClip instance, with the audio +attribute set to audio, which must be an AudioClip instance.

+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, mf)
+

Change the clip’s get_frame.

+

Returns a copy of the VideoClip instance, with the make_frame +attribute set to mf.

+
+ +
+
+set_mask(self, mask)
+

Set the clip’s mask.

+

Returns a copy of the VideoClip with the mask attribute set to +mask, which must be a greyscale (values in 0-1) VideoClip

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_opacity(self, op)
+

Set the opacity/transparency level of the clip.

+

Returns a semi-transparent copy of the clip where the mask is +multiplied by op (any float, normally between 0 and 1).

+
+ +
+
+set_pos(*a, **kw)
+

The function set_pos is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, set_position, instead.

+
+ +
+
+set_position(self, pos, relative=False)
+

Set the clip’s position in compositions.

+

Sets the position that the clip will have when included +in compositions. The argument pos can be either a couple +(x,y) or a function t-> (x,y). x and y mark the +location of the top left corner of the clip, and can be +of several types.

+

Examples

+
>>> clip.set_position((45,150)) # x=45, y=150
+>>>
+>>> # clip horizontally centered, at the top of the picture
+>>> clip.set_position(("center","top"))
+>>>
+>>> # clip is at 40% of the width, 70% of the height:
+>>> clip.set_position((0.4,0.7), relative=True)
+>>>
+>>> # clip's position is horizontally centered, and moving up !
+>>> clip.set_position(lambda t: ('center', 50+t) )
+
+
+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+subfx(self, fx, ta=0, tb=None, **kwargs)
+

Apply a transformation to a part of the clip.

+

Returns a new clip in which the function fun (clip->clip) +has been applied to the subclip between times ta and tb +(in seconds).

+

Examples

+
>>> # The scene between times t=3s and t=6s in ``clip`` will be
+>>> # be played twice slower in ``newclip``
+>>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)
+
+
+
+ +
+
+to_ImageClip(self, t=0, with_mask=True, duration=None)
+

Returns an ImageClip made out of the clip’s frame at time t, +which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+
+ +
+
+to_RGB(self)
+

Return a non-mask video clip made from the mask video clip.

+
+ +
+
+to_gif(*a, **kw)
+

The function to_gif is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_gif, instead.

+
+ +
+
+to_images_sequence(*a, **kw)
+

The function to_images_sequence is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_images_sequence, instead.

+
+ +
+
+to_mask(self, canal=0)
+

Return a mask a video clip made from the clip.

+
+ +
+
+to_videofile(*a, **kw)
+

The function to_videofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_videofile, instead.

+
+ +
+
+without_audio(self)
+

Remove the clip’s audio.

+

Return a copy of the clip with audio set to None.

+
+ +
+
+write_gif(self, filename, fps=None, program='imageio', opt='nq', fuzz=1, verbose=True, loop=0, dispose=False, colors=None, tempfiles=False, logger='bar')
+

Write the VideoClip to a GIF file.

+

Converts a VideoClip into an animated GIF using ImageMagick +or ffmpeg.

+ +++ + + + +
Parameters:
+
filename
+

Name of the resulting gif file.

+
+
fps
+

Number of frames per second (see note below). If it +isn’t provided, then the function will look for the clip’s +fps attribute (VideoFileClip, for instance, have one).

+
+
program
+

Software to use for the conversion, either ‘imageio’ (this will use +the library FreeImage through ImageIO), or ‘ImageMagick’, or ‘ffmpeg’.

+
+
opt
+

Optimalization to apply. If program=’imageio’, opt must be either ‘wu’ +(Wu) or ‘nq’ (Neuquant). If program=’ImageMagick’, +either ‘optimizeplus’ or ‘OptimizeTransparency’.

+
+
fuzz
+

(ImageMagick only) Compresses the GIF by considering that +the colors that are less than fuzz% different are in fact +the same.

+
+
tempfiles
+

Writes every frame to a file instead of passing them in the RAM. +Useful on computers with little RAM. Can only be used with +ImageMagick’ or ‘ffmpeg’.

+
+
progress_bar
+

If True, displays a progress bar

+
+
+
+

Notes

+

The gif will be playing the clip in real time (you can +only change the frame rate). If you want the gif to be played +slower than the clip you will use

+
>>> # slow down clip 50% and make it a gif
+>>> myClip.speedx(0.5).to_gif('myClip.gif')
+
+
+
+ +
+
+write_images_sequence(self, nameformat, fps=None, verbose=True, withmask=True, logger='bar')
+

Writes the videoclip to a sequence of image files.

+ +++ + + + + + +
Parameters:
+
nameformat
+

A filename specifying the numerotation format and extension +of the pictures. For instance “frame%03d.png” for filenames +indexed with 3 digits and PNG format. Also possible: +“some_folder/frame%04d.jpeg”, etc.

+
+
fps
+

Number of frames per second to consider when writing the +clip. If not specified, the clip’s fps attribute will +be used if it has one.

+
+
withmask
+

will save the clip’s mask (if any) as an alpha canal (PNGs only).

+
+
verbose
+

Boolean indicating whether to print information.

+
+
logger
+

Either ‘bar’ (progress bar) or None or any Proglog logger.

+
+
+
Returns:
+
names_list
+

A list of all the files generated.

+
+
+
+

Notes

+

The resulting image sequence can be read using e.g. the class +ImageSequenceClip.

+
+ +
+
+write_videofile(self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, rewrite_audio=True, remove_temp=True, write_logfile=False, verbose=True, threads=None, ffmpeg_params=None, logger='bar')
+

Write the clip to a videofile.

+ +++ + + + +
Parameters:
+
filename
+

Name of the video file to write in. +The extension must correspond to the “codec” used (see below), +or simply be ‘.avi’ (which will work with any codec).

+
+
fps
+

Number of frames per second in the resulting video file. If None is +provided, and the clip has an fps attribute, this fps will be used.

+
+
codec
+

Codec to use for image encoding. Can be any codec supported +by ffmpeg. If the filename is has extension ‘.mp4’, ‘.ogv’, ‘.webm’, +the codec will be set accordingly, but you can still set it if you +don’t like the default. For other extensions, the output filename +must be set accordingly.

+

Some examples of codecs are:

+

'libx264' (default codec for file extension .mp4) +makes well-compressed videos (quality tunable using ‘bitrate’).

+

'mpeg4' (other codec for extension .mp4) can be an alternative +to 'libx264', and produces higher quality videos by default.

+

'rawvideo' (use file extension .avi) will produce +a video of perfect quality, of possibly very huge size.

+

png (use file extension .avi) will produce a video +of perfect quality, of smaller size than with rawvideo.

+

'libvorbis' (use file extension .ogv) is a nice video +format, which is completely free/ open source. However not +everyone has the codecs installed by default on their machine.

+

'libvpx' (use file extension .webm) is tiny a video +format well indicated for web videos (with HTML5). Open source.

+
+
audio
+

Either True, False, or a file name. +If True and the clip has an audio clip attached, this +audio clip will be incorporated as a soundtrack in the movie. +If audio is the name of an audio file, this audio file +will be incorporated as a soundtrack in the movie.

+
+
audiofps
+

frame rate to use when generating the sound.

+
+
temp_audiofile
+

the name of the temporary audiofile to be generated and +incorporated in the the movie, if any.

+
+
audio_codec
+

Which audio codec should be used. Examples are ‘libmp3lame’ +for ‘.mp3’, ‘libvorbis’ for ‘ogg’, ‘libfdk_aac’:’m4a’, +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav. +Default is ‘libmp3lame’, unless the video extension is ‘ogv’ +or ‘webm’, at which case the default is ‘libvorbis’.

+
+
audio_bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size/quality of audio in the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the final file.

+
+
preset
+

Sets the time that FFMPEG will spend optimizing the compression. +Choices are: ultrafast, superfast, veryfast, faster, fast, medium, +slow, slower, veryslow, placebo. Note that this does not impact +the quality of the video, only the size of the video file. So +choose ultrafast when you are in a hurry and file size does not +matter.

+
+
threads
+

Number of threads to use for ffmpeg. Can speed up the writing of +the video on multicore computers.

+
+
ffmpeg_params
+

Any additional ffmpeg parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’].

+
+
write_logfile
+

If true, will write log files for the audio and the video. +These will be files ending with ‘.log’ with the name of the +output file in them.

+
+
logger
+

Either “bar” for progress bar or None or any Proglog logger.

+
+
verbose (deprecated, kept for compatibility)
+

Formerly used for toggling messages on/off. Use logger=None now.

+
+
+
+

Examples

+
>>> from moviepy.editor import VideoFileClip
+>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
+>>> clip.write_videofile("my_new_video.mp4")
+>>> clip.close()
+
+
+
+ +
+ +
+
+

ColorClip

+
+
+class moviepy.video.VideoClip.ColorClip(size, color=None, ismask=False, duration=None, col=None)[source]
+

Bases: moviepy.video.VideoClip.ImageClip

+

An ImageClip showing just one color.

+ +++ + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the clip.

+
+
color
+

If argument ismask is False, color indicates +the color in RGB of the clip (default is black). If ismask` +is True, color must be a float between 0 and 1 (default is 1)

+
+
ismask
+

Set to true if the clip will be used as a mask.

+
+
col
+

Has been deprecated. Do not use.

+
+
+
+
+
+add_mask(self)
+

Add a mask VideoClip to the VideoClip.

+

Returns a copy of the clip with a completely opaque mask +(made of ones). This makes computations slower compared to +having a None mask but can be useful in many cases. Choose

+

Set constant_size to False for clips with moving +image size.

+
+ +
+
+afx(self, fun, *a, **k)
+

Transform the clip’s audio.

+

Return a new clip whose audio has been transformed by fun.

+
+ +
+
+blit_on(self, picture, t)
+

Returns the result of the blit of the clip’s frame at time t +on the given picture, the position of the clip being given +by the clip’s pos attribute. Meant for compositing.

+
+ +
+
+close(self)
+

Release any resources that are in use.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fl, apply_to=None, keep_duration=True)
+

General transformation filter.

+

Equivalent to VideoClip.fl . The result is no more an +ImageClip, it has the class VideoClip (since it may be animated)

+
+ +
+
+fl_image(self, image_func, apply_to=None)
+

Image-transformation filter.

+

Does the same as VideoClip.fl_image, but for ImageClip the +tranformed clip is computed once and for all at the beginning, +and not for each ‘frame’.

+
+ +
+
+fl_time(self, time_func, apply_to=None, keep_duration=False)
+

Time-transformation filter.

+

Applies a transformation to the clip’s timeline +(see Clip.fl_time).

+

This method does nothing for ImageClips (but it may affect their +masks or their audios). The result is still an ImageClip.

+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None)
+

Place the clip on a colored background.

+

Returns a clip made of the current clip overlaid on a color +clip of a possibly bigger size. Can serve to flatten transparent +clips.

+ +++ + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the final clip. +By default it will be the size of the current clip.

+
+
color
+

Background color of the final clip ([R,G,B]).

+
+
pos
+

Position of the clip in the final clip. ‘center’ is the default

+
+
col_opacity
+

Parameter in 0..1 indicating the opacity of the colored +background.

+
+
+
+
+ +
+
+save_frame(self, filename, t=0, withmask=True)
+

Save a clip’s frame to an image file.

+

Saves the frame of clip corresponding to time t in +‘filename’. t can be expressed in seconds (15.35), in +(min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If withmask is True the mask is saved in +the alpha layer of the picture (only works with PNGs).

+
+ +
+
+set_audio(self, audioclip)
+

Attach an AudioClip to the VideoClip.

+

Returns a copy of the VideoClip instance, with the audio +attribute set to audio, which must be an AudioClip instance.

+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, mf)
+

Change the clip’s get_frame.

+

Returns a copy of the VideoClip instance, with the make_frame +attribute set to mf.

+
+ +
+
+set_mask(self, mask)
+

Set the clip’s mask.

+

Returns a copy of the VideoClip with the mask attribute set to +mask, which must be a greyscale (values in 0-1) VideoClip

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_opacity(self, op)
+

Set the opacity/transparency level of the clip.

+

Returns a semi-transparent copy of the clip where the mask is +multiplied by op (any float, normally between 0 and 1).

+
+ +
+
+set_pos(*a, **kw)
+

The function set_pos is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, set_position, instead.

+
+ +
+
+set_position(self, pos, relative=False)
+

Set the clip’s position in compositions.

+

Sets the position that the clip will have when included +in compositions. The argument pos can be either a couple +(x,y) or a function t-> (x,y). x and y mark the +location of the top left corner of the clip, and can be +of several types.

+

Examples

+
>>> clip.set_position((45,150)) # x=45, y=150
+>>>
+>>> # clip horizontally centered, at the top of the picture
+>>> clip.set_position(("center","top"))
+>>>
+>>> # clip is at 40% of the width, 70% of the height:
+>>> clip.set_position((0.4,0.7), relative=True)
+>>>
+>>> # clip's position is horizontally centered, and moving up !
+>>> clip.set_position(lambda t: ('center', 50+t) )
+
+
+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+subfx(self, fx, ta=0, tb=None, **kwargs)
+

Apply a transformation to a part of the clip.

+

Returns a new clip in which the function fun (clip->clip) +has been applied to the subclip between times ta and tb +(in seconds).

+

Examples

+
>>> # The scene between times t=3s and t=6s in ``clip`` will be
+>>> # be played twice slower in ``newclip``
+>>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)
+
+
+
+ +
+
+to_ImageClip(self, t=0, with_mask=True, duration=None)
+

Returns an ImageClip made out of the clip’s frame at time t, +which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+
+ +
+
+to_RGB(self)
+

Return a non-mask video clip made from the mask video clip.

+
+ +
+
+to_gif(*a, **kw)
+

The function to_gif is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_gif, instead.

+
+ +
+
+to_images_sequence(*a, **kw)
+

The function to_images_sequence is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_images_sequence, instead.

+
+ +
+
+to_mask(self, canal=0)
+

Return a mask a video clip made from the clip.

+
+ +
+
+to_videofile(*a, **kw)
+

The function to_videofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_videofile, instead.

+
+ +
+
+without_audio(self)
+

Remove the clip’s audio.

+

Return a copy of the clip with audio set to None.

+
+ +
+
+write_gif(self, filename, fps=None, program='imageio', opt='nq', fuzz=1, verbose=True, loop=0, dispose=False, colors=None, tempfiles=False, logger='bar')
+

Write the VideoClip to a GIF file.

+

Converts a VideoClip into an animated GIF using ImageMagick +or ffmpeg.

+ +++ + + + +
Parameters:
+
filename
+

Name of the resulting gif file.

+
+
fps
+

Number of frames per second (see note below). If it +isn’t provided, then the function will look for the clip’s +fps attribute (VideoFileClip, for instance, have one).

+
+
program
+

Software to use for the conversion, either ‘imageio’ (this will use +the library FreeImage through ImageIO), or ‘ImageMagick’, or ‘ffmpeg’.

+
+
opt
+

Optimalization to apply. If program=’imageio’, opt must be either ‘wu’ +(Wu) or ‘nq’ (Neuquant). If program=’ImageMagick’, +either ‘optimizeplus’ or ‘OptimizeTransparency’.

+
+
fuzz
+

(ImageMagick only) Compresses the GIF by considering that +the colors that are less than fuzz% different are in fact +the same.

+
+
tempfiles
+

Writes every frame to a file instead of passing them in the RAM. +Useful on computers with little RAM. Can only be used with +ImageMagick’ or ‘ffmpeg’.

+
+
progress_bar
+

If True, displays a progress bar

+
+
+
+

Notes

+

The gif will be playing the clip in real time (you can +only change the frame rate). If you want the gif to be played +slower than the clip you will use

+
>>> # slow down clip 50% and make it a gif
+>>> myClip.speedx(0.5).to_gif('myClip.gif')
+
+
+
+ +
+
+write_images_sequence(self, nameformat, fps=None, verbose=True, withmask=True, logger='bar')
+

Writes the videoclip to a sequence of image files.

+ +++ + + + + + +
Parameters:
+
nameformat
+

A filename specifying the numerotation format and extension +of the pictures. For instance “frame%03d.png” for filenames +indexed with 3 digits and PNG format. Also possible: +“some_folder/frame%04d.jpeg”, etc.

+
+
fps
+

Number of frames per second to consider when writing the +clip. If not specified, the clip’s fps attribute will +be used if it has one.

+
+
withmask
+

will save the clip’s mask (if any) as an alpha canal (PNGs only).

+
+
verbose
+

Boolean indicating whether to print information.

+
+
logger
+

Either ‘bar’ (progress bar) or None or any Proglog logger.

+
+
+
Returns:
+
names_list
+

A list of all the files generated.

+
+
+
+

Notes

+

The resulting image sequence can be read using e.g. the class +ImageSequenceClip.

+
+ +
+
+write_videofile(self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, rewrite_audio=True, remove_temp=True, write_logfile=False, verbose=True, threads=None, ffmpeg_params=None, logger='bar')
+

Write the clip to a videofile.

+ +++ + + + +
Parameters:
+
filename
+

Name of the video file to write in. +The extension must correspond to the “codec” used (see below), +or simply be ‘.avi’ (which will work with any codec).

+
+
fps
+

Number of frames per second in the resulting video file. If None is +provided, and the clip has an fps attribute, this fps will be used.

+
+
codec
+

Codec to use for image encoding. Can be any codec supported +by ffmpeg. If the filename is has extension ‘.mp4’, ‘.ogv’, ‘.webm’, +the codec will be set accordingly, but you can still set it if you +don’t like the default. For other extensions, the output filename +must be set accordingly.

+

Some examples of codecs are:

+

'libx264' (default codec for file extension .mp4) +makes well-compressed videos (quality tunable using ‘bitrate’).

+

'mpeg4' (other codec for extension .mp4) can be an alternative +to 'libx264', and produces higher quality videos by default.

+

'rawvideo' (use file extension .avi) will produce +a video of perfect quality, of possibly very huge size.

+

png (use file extension .avi) will produce a video +of perfect quality, of smaller size than with rawvideo.

+

'libvorbis' (use file extension .ogv) is a nice video +format, which is completely free/ open source. However not +everyone has the codecs installed by default on their machine.

+

'libvpx' (use file extension .webm) is tiny a video +format well indicated for web videos (with HTML5). Open source.

+
+
audio
+

Either True, False, or a file name. +If True and the clip has an audio clip attached, this +audio clip will be incorporated as a soundtrack in the movie. +If audio is the name of an audio file, this audio file +will be incorporated as a soundtrack in the movie.

+
+
audiofps
+

frame rate to use when generating the sound.

+
+
temp_audiofile
+

the name of the temporary audiofile to be generated and +incorporated in the the movie, if any.

+
+
audio_codec
+

Which audio codec should be used. Examples are ‘libmp3lame’ +for ‘.mp3’, ‘libvorbis’ for ‘ogg’, ‘libfdk_aac’:’m4a’, +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav. +Default is ‘libmp3lame’, unless the video extension is ‘ogv’ +or ‘webm’, at which case the default is ‘libvorbis’.

+
+
audio_bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size/quality of audio in the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the final file.

+
+
preset
+

Sets the time that FFMPEG will spend optimizing the compression. +Choices are: ultrafast, superfast, veryfast, faster, fast, medium, +slow, slower, veryslow, placebo. Note that this does not impact +the quality of the video, only the size of the video file. So +choose ultrafast when you are in a hurry and file size does not +matter.

+
+
threads
+

Number of threads to use for ffmpeg. Can speed up the writing of +the video on multicore computers.

+
+
ffmpeg_params
+

Any additional ffmpeg parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’].

+
+
write_logfile
+

If true, will write log files for the audio and the video. +These will be files ending with ‘.log’ with the name of the +output file in them.

+
+
logger
+

Either “bar” for progress bar or None or any Proglog logger.

+
+
verbose (deprecated, kept for compatibility)
+

Formerly used for toggling messages on/off. Use logger=None now.

+
+
+
+

Examples

+
>>> from moviepy.editor import VideoFileClip
+>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
+>>> clip.write_videofile("my_new_video.mp4")
+>>> clip.close()
+
+
+
+ +
+ +
+
+

TextClip

+
+
+class moviepy.video.VideoClip.TextClip(txt=None, filename=None, size=None, color='black', bg_color='transparent', fontsize=None, font='Courier', stroke_color=None, stroke_width=1, method='label', kerning=None, align='center', interline=None, tempfilename=None, temptxt=None, transparent=True, remove_temp=True, print_cmd=False)[source]
+

Bases: moviepy.video.VideoClip.ImageClip

+

Class for autogenerated text clips.

+

Creates an ImageClip originating from a script-generated text image. +Requires ImageMagick.

+ +++ + + + +
Parameters:
+
txt
+

A string of the text to write. Can be replaced by argument +filename.

+
+
filename
+

The name of a file in which there is the text to write. +Can be provided instead of argument txt

+
+
size
+

Size of the picture in pixels. Can be auto-set if +method=’label’, but mandatory if method=’caption’. +the height can be None, it will then be auto-determined.

+
+
bg_color
+

Color of the background. See TextClip.list('color') +for a list of acceptable names.

+
+
color
+

Color of the text. See TextClip.list('color') for a +list of acceptable names.

+
+
font
+

Name of the font to use. See TextClip.list('font') for +the list of fonts you can use on your computer.

+
+
stroke_color
+

Color of the stroke (=contour line) of the text. If None, +there will be no stroke.

+
+
stroke_width
+

Width of the stroke, in pixels. Can be a float, like 1.5.

+
+
method
+

Either ‘label’ (default, the picture will be autosized so as to fit +exactly the size) or ‘caption’ (the text will be drawn in a picture +with fixed size provided with the size argument). If caption, +the text will be wrapped automagically (sometimes it is buggy, not +my fault, complain to the ImageMagick crew) and can be aligned or +centered (see parameter align).

+
+
kerning
+

Changes the default spacing between letters. For +instance kerning=-1 will make the letters 1 pixel nearer from +ach other compared to the default spacing.

+
+
align
+

center | East | West | South | North . Will only work if method +is set to caption

+
+
transparent
+

True (default) if you want to take into account the +transparency in the image.

+
+
+
+
+
+add_mask(self)
+

Add a mask VideoClip to the VideoClip.

+

Returns a copy of the clip with a completely opaque mask +(made of ones). This makes computations slower compared to +having a None mask but can be useful in many cases. Choose

+

Set constant_size to False for clips with moving +image size.

+
+ +
+
+afx(self, fun, *a, **k)
+

Transform the clip’s audio.

+

Return a new clip whose audio has been transformed by fun.

+
+ +
+
+blit_on(self, picture, t)
+

Returns the result of the blit of the clip’s frame at time t +on the given picture, the position of the clip being given +by the clip’s pos attribute. Meant for compositing.

+
+ +
+
+close(self)
+

Release any resources that are in use.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fl, apply_to=None, keep_duration=True)
+

General transformation filter.

+

Equivalent to VideoClip.fl . The result is no more an +ImageClip, it has the class VideoClip (since it may be animated)

+
+ +
+
+fl_image(self, image_func, apply_to=None)
+

Image-transformation filter.

+

Does the same as VideoClip.fl_image, but for ImageClip the +tranformed clip is computed once and for all at the beginning, +and not for each ‘frame’.

+
+ +
+
+fl_time(self, time_func, apply_to=None, keep_duration=False)
+

Time-transformation filter.

+

Applies a transformation to the clip’s timeline +(see Clip.fl_time).

+

This method does nothing for ImageClips (but it may affect their +masks or their audios). The result is still an ImageClip.

+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+static list(arg)[source]
+

Returns the list of all valid entries for the argument of +TextClip given (can be font, color, etc…)

+
+ +
+
+on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None)
+

Place the clip on a colored background.

+

Returns a clip made of the current clip overlaid on a color +clip of a possibly bigger size. Can serve to flatten transparent +clips.

+ +++ + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the final clip. +By default it will be the size of the current clip.

+
+
color
+

Background color of the final clip ([R,G,B]).

+
+
pos
+

Position of the clip in the final clip. ‘center’ is the default

+
+
col_opacity
+

Parameter in 0..1 indicating the opacity of the colored +background.

+
+
+
+
+ +
+
+save_frame(self, filename, t=0, withmask=True)
+

Save a clip’s frame to an image file.

+

Saves the frame of clip corresponding to time t in +‘filename’. t can be expressed in seconds (15.35), in +(min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If withmask is True the mask is saved in +the alpha layer of the picture (only works with PNGs).

+
+ +
+
+static search(string, arg)[source]
+

Returns the of all valid entries which contain string for the +argument arg of TextClip, for instance

+
>>> # Find all the available fonts which contain "Courier"
+>>> print ( TextClip.search('Courier', 'font') )
+
+
+
+ +
+
+set_audio(self, audioclip)
+

Attach an AudioClip to the VideoClip.

+

Returns a copy of the VideoClip instance, with the audio +attribute set to audio, which must be an AudioClip instance.

+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, mf)
+

Change the clip’s get_frame.

+

Returns a copy of the VideoClip instance, with the make_frame +attribute set to mf.

+
+ +
+
+set_mask(self, mask)
+

Set the clip’s mask.

+

Returns a copy of the VideoClip with the mask attribute set to +mask, which must be a greyscale (values in 0-1) VideoClip

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_opacity(self, op)
+

Set the opacity/transparency level of the clip.

+

Returns a semi-transparent copy of the clip where the mask is +multiplied by op (any float, normally between 0 and 1).

+
+ +
+
+set_pos(*a, **kw)
+

The function set_pos is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, set_position, instead.

+
+ +
+
+set_position(self, pos, relative=False)
+

Set the clip’s position in compositions.

+

Sets the position that the clip will have when included +in compositions. The argument pos can be either a couple +(x,y) or a function t-> (x,y). x and y mark the +location of the top left corner of the clip, and can be +of several types.

+

Examples

+
>>> clip.set_position((45,150)) # x=45, y=150
+>>>
+>>> # clip horizontally centered, at the top of the picture
+>>> clip.set_position(("center","top"))
+>>>
+>>> # clip is at 40% of the width, 70% of the height:
+>>> clip.set_position((0.4,0.7), relative=True)
+>>>
+>>> # clip's position is horizontally centered, and moving up !
+>>> clip.set_position(lambda t: ('center', 50+t) )
+
+
+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+subfx(self, fx, ta=0, tb=None, **kwargs)
+

Apply a transformation to a part of the clip.

+

Returns a new clip in which the function fun (clip->clip) +has been applied to the subclip between times ta and tb +(in seconds).

+

Examples

+
>>> # The scene between times t=3s and t=6s in ``clip`` will be
+>>> # be played twice slower in ``newclip``
+>>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)
+
+
+
+ +
+
+to_ImageClip(self, t=0, with_mask=True, duration=None)
+

Returns an ImageClip made out of the clip’s frame at time t, +which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+
+ +
+
+to_RGB(self)
+

Return a non-mask video clip made from the mask video clip.

+
+ +
+
+to_gif(*a, **kw)
+

The function to_gif is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_gif, instead.

+
+ +
+
+to_images_sequence(*a, **kw)
+

The function to_images_sequence is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_images_sequence, instead.

+
+ +
+
+to_mask(self, canal=0)
+

Return a mask a video clip made from the clip.

+
+ +
+
+to_videofile(*a, **kw)
+

The function to_videofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_videofile, instead.

+
+ +
+
+without_audio(self)
+

Remove the clip’s audio.

+

Return a copy of the clip with audio set to None.

+
+ +
+
+write_gif(self, filename, fps=None, program='imageio', opt='nq', fuzz=1, verbose=True, loop=0, dispose=False, colors=None, tempfiles=False, logger='bar')
+

Write the VideoClip to a GIF file.

+

Converts a VideoClip into an animated GIF using ImageMagick +or ffmpeg.

+ +++ + + + +
Parameters:
+
filename
+

Name of the resulting gif file.

+
+
fps
+

Number of frames per second (see note below). If it +isn’t provided, then the function will look for the clip’s +fps attribute (VideoFileClip, for instance, have one).

+
+
program
+

Software to use for the conversion, either ‘imageio’ (this will use +the library FreeImage through ImageIO), or ‘ImageMagick’, or ‘ffmpeg’.

+
+
opt
+

Optimalization to apply. If program=’imageio’, opt must be either ‘wu’ +(Wu) or ‘nq’ (Neuquant). If program=’ImageMagick’, +either ‘optimizeplus’ or ‘OptimizeTransparency’.

+
+
fuzz
+

(ImageMagick only) Compresses the GIF by considering that +the colors that are less than fuzz% different are in fact +the same.

+
+
tempfiles
+

Writes every frame to a file instead of passing them in the RAM. +Useful on computers with little RAM. Can only be used with +ImageMagick’ or ‘ffmpeg’.

+
+
progress_bar
+

If True, displays a progress bar

+
+
+
+

Notes

+

The gif will be playing the clip in real time (you can +only change the frame rate). If you want the gif to be played +slower than the clip you will use

+
>>> # slow down clip 50% and make it a gif
+>>> myClip.speedx(0.5).to_gif('myClip.gif')
+
+
+
+ +
+
+write_images_sequence(self, nameformat, fps=None, verbose=True, withmask=True, logger='bar')
+

Writes the videoclip to a sequence of image files.

+ +++ + + + + + +
Parameters:
+
nameformat
+

A filename specifying the numerotation format and extension +of the pictures. For instance “frame%03d.png” for filenames +indexed with 3 digits and PNG format. Also possible: +“some_folder/frame%04d.jpeg”, etc.

+
+
fps
+

Number of frames per second to consider when writing the +clip. If not specified, the clip’s fps attribute will +be used if it has one.

+
+
withmask
+

will save the clip’s mask (if any) as an alpha canal (PNGs only).

+
+
verbose
+

Boolean indicating whether to print information.

+
+
logger
+

Either ‘bar’ (progress bar) or None or any Proglog logger.

+
+
+
Returns:
+
names_list
+

A list of all the files generated.

+
+
+
+

Notes

+

The resulting image sequence can be read using e.g. the class +ImageSequenceClip.

+
+ +
+
+write_videofile(self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, rewrite_audio=True, remove_temp=True, write_logfile=False, verbose=True, threads=None, ffmpeg_params=None, logger='bar')
+

Write the clip to a videofile.

+ +++ + + + +
Parameters:
+
filename
+

Name of the video file to write in. +The extension must correspond to the “codec” used (see below), +or simply be ‘.avi’ (which will work with any codec).

+
+
fps
+

Number of frames per second in the resulting video file. If None is +provided, and the clip has an fps attribute, this fps will be used.

+
+
codec
+

Codec to use for image encoding. Can be any codec supported +by ffmpeg. If the filename is has extension ‘.mp4’, ‘.ogv’, ‘.webm’, +the codec will be set accordingly, but you can still set it if you +don’t like the default. For other extensions, the output filename +must be set accordingly.

+

Some examples of codecs are:

+

'libx264' (default codec for file extension .mp4) +makes well-compressed videos (quality tunable using ‘bitrate’).

+

'mpeg4' (other codec for extension .mp4) can be an alternative +to 'libx264', and produces higher quality videos by default.

+

'rawvideo' (use file extension .avi) will produce +a video of perfect quality, of possibly very huge size.

+

png (use file extension .avi) will produce a video +of perfect quality, of smaller size than with rawvideo.

+

'libvorbis' (use file extension .ogv) is a nice video +format, which is completely free/ open source. However not +everyone has the codecs installed by default on their machine.

+

'libvpx' (use file extension .webm) is tiny a video +format well indicated for web videos (with HTML5). Open source.

+
+
audio
+

Either True, False, or a file name. +If True and the clip has an audio clip attached, this +audio clip will be incorporated as a soundtrack in the movie. +If audio is the name of an audio file, this audio file +will be incorporated as a soundtrack in the movie.

+
+
audiofps
+

frame rate to use when generating the sound.

+
+
temp_audiofile
+

the name of the temporary audiofile to be generated and +incorporated in the the movie, if any.

+
+
audio_codec
+

Which audio codec should be used. Examples are ‘libmp3lame’ +for ‘.mp3’, ‘libvorbis’ for ‘ogg’, ‘libfdk_aac’:’m4a’, +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav. +Default is ‘libmp3lame’, unless the video extension is ‘ogv’ +or ‘webm’, at which case the default is ‘libvorbis’.

+
+
audio_bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size/quality of audio in the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the final file.

+
+
preset
+

Sets the time that FFMPEG will spend optimizing the compression. +Choices are: ultrafast, superfast, veryfast, faster, fast, medium, +slow, slower, veryslow, placebo. Note that this does not impact +the quality of the video, only the size of the video file. So +choose ultrafast when you are in a hurry and file size does not +matter.

+
+
threads
+

Number of threads to use for ffmpeg. Can speed up the writing of +the video on multicore computers.

+
+
ffmpeg_params
+

Any additional ffmpeg parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’].

+
+
write_logfile
+

If true, will write log files for the audio and the video. +These will be files ending with ‘.log’ with the name of the +output file in them.

+
+
logger
+

Either “bar” for progress bar or None or any Proglog logger.

+
+
verbose (deprecated, kept for compatibility)
+

Formerly used for toggling messages on/off. Use logger=None now.

+
+
+
+

Examples

+
>>> from moviepy.editor import VideoFileClip
+>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
+>>> clip.write_videofile("my_new_video.mp4")
+>>> clip.close()
+
+
+
+ +
+ +
+
+

CompositeVideoClip

+
+
+class moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip(clips, size=None, bg_color=None, use_bgclip=False, ismask=False)[source]
+

Bases: moviepy.video.VideoClip.VideoClip

+

A VideoClip made of other videoclips displayed together. This is the +base class for most compositions.

+ +++ + + + +
Parameters:
+
size
+

The size (height x width) of the final clip.

+
+
clips
+

A list of videoclips. Each clip of the list will +be displayed below the clips appearing after it in the list. +For each clip:

+
    +
  • +
    The attribute pos determines where the clip is placed.
    +
    See VideoClip.set_pos
    +
    +
  • +
  • The mask of the clip determines which parts are visible.
  • +
+

Finally, if all the clips in the list have their duration +attribute set, then the duration of the composite video clip +is computed automatically

+
+
bg_color
+

Color for the unmasked and unfilled regions. Set to None for these +regions to be transparent (will be slower).

+
+
use_bgclip
+

Set to True if the first clip in the list should be used as the +‘background’ on which all other clips are blitted. That first clip must +have the same size as the final clip. If it has no transparency, the final +clip will have no mask.

+
+
The clip with the highest FPS will be the FPS of the composite clip.
+
+
+
+
+
+add_mask(self)
+

Add a mask VideoClip to the VideoClip.

+

Returns a copy of the clip with a completely opaque mask +(made of ones). This makes computations slower compared to +having a None mask but can be useful in many cases. Choose

+

Set constant_size to False for clips with moving +image size.

+
+ +
+
+afx(self, fun, *a, **k)
+

Transform the clip’s audio.

+

Return a new clip whose audio has been transformed by fun.

+
+ +
+
+blit_on(self, picture, t)
+

Returns the result of the blit of the clip’s frame at time t +on the given picture, the position of the clip being given +by the clip’s pos attribute. Meant for compositing.

+
+ +
+
+close(self)[source]
+

Release any resources that are in use.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fun, apply_to=None, keep_duration=True)
+

General processing of a clip.

+

Returns a new Clip whose frames are a transformation +(through function fun) of the frames of the current clip.

+ +++ + + + +
Parameters:
+
fun
+

A function with signature (gf,t -> frame) where gf will +represent the current clip’s get_frame method, +i.e. gf is a function (t->image). Parameter t is a time +in seconds, frame is a picture (=Numpy array) which will be +returned by the transformed clip (see examples below).

+
+
apply_to
+

Can be either 'mask', or 'audio', or +['mask','audio']. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration
+

Set to True if the transformation does not change the +duration of the clip.

+
+
+
+

Examples

+

In the following newclip a 100 pixels-high clip whose video +content scrolls from the top to the bottom of the frames of +clip.

+
>>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :]
+>>> newclip = clip.fl(fl, apply_to='mask')
+
+
+
+ +
+
+fl_image(self, image_func, apply_to=None)
+

Modifies the images of a clip by replacing the frame +get_frame(t) by another frame, image_func(get_frame(t))

+
+ +
+
+fl_time(self, t_func, apply_to=None, keep_duration=False)
+

Returns a Clip instance playing the content of the current clip +but with a modified timeline, time t being replaced by another +time t_func(t).

+ +++ + + + +
Parameters:
+
t_func:
+

A function t-> new_t

+
+
apply_to:
+

Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration:
+

False (default) if the transformation modifies the +duration of the clip.

+
+
+
+

Examples

+
>>> # plays the clip (and its mask and sound) twice faster
+>>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio'])
+>>>
+>>> # plays the clip starting at t=3, and backwards:
+>>> newclip = clip.fl_time(lambda: 3-t)
+
+
+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None)
+

Place the clip on a colored background.

+

Returns a clip made of the current clip overlaid on a color +clip of a possibly bigger size. Can serve to flatten transparent +clips.

+ +++ + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the final clip. +By default it will be the size of the current clip.

+
+
color
+

Background color of the final clip ([R,G,B]).

+
+
pos
+

Position of the clip in the final clip. ‘center’ is the default

+
+
col_opacity
+

Parameter in 0..1 indicating the opacity of the colored +background.

+
+
+
+
+ +
+
+playing_clips(self, t=0)[source]
+

Returns a list of the clips in the composite clips that are +actually playing at the given time t.

+
+ +
+
+save_frame(self, filename, t=0, withmask=True)
+

Save a clip’s frame to an image file.

+

Saves the frame of clip corresponding to time t in +‘filename’. t can be expressed in seconds (15.35), in +(min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If withmask is True the mask is saved in +the alpha layer of the picture (only works with PNGs).

+
+ +
+
+set_audio(self, audioclip)
+

Attach an AudioClip to the VideoClip.

+

Returns a copy of the VideoClip instance, with the audio +attribute set to audio, which must be an AudioClip instance.

+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, mf)
+

Change the clip’s get_frame.

+

Returns a copy of the VideoClip instance, with the make_frame +attribute set to mf.

+
+ +
+
+set_mask(self, mask)
+

Set the clip’s mask.

+

Returns a copy of the VideoClip with the mask attribute set to +mask, which must be a greyscale (values in 0-1) VideoClip

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_opacity(self, op)
+

Set the opacity/transparency level of the clip.

+

Returns a semi-transparent copy of the clip where the mask is +multiplied by op (any float, normally between 0 and 1).

+
+ +
+
+set_pos(*a, **kw)
+

The function set_pos is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, set_position, instead.

+
+ +
+
+set_position(self, pos, relative=False)
+

Set the clip’s position in compositions.

+

Sets the position that the clip will have when included +in compositions. The argument pos can be either a couple +(x,y) or a function t-> (x,y). x and y mark the +location of the top left corner of the clip, and can be +of several types.

+

Examples

+
>>> clip.set_position((45,150)) # x=45, y=150
+>>>
+>>> # clip horizontally centered, at the top of the picture
+>>> clip.set_position(("center","top"))
+>>>
+>>> # clip is at 40% of the width, 70% of the height:
+>>> clip.set_position((0.4,0.7), relative=True)
+>>>
+>>> # clip's position is horizontally centered, and moving up !
+>>> clip.set_position(lambda t: ('center', 50+t) )
+
+
+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+subfx(self, fx, ta=0, tb=None, **kwargs)
+

Apply a transformation to a part of the clip.

+

Returns a new clip in which the function fun (clip->clip) +has been applied to the subclip between times ta and tb +(in seconds).

+

Examples

+
>>> # The scene between times t=3s and t=6s in ``clip`` will be
+>>> # be played twice slower in ``newclip``
+>>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)
+
+
+
+ +
+
+to_ImageClip(self, t=0, with_mask=True, duration=None)
+

Returns an ImageClip made out of the clip’s frame at time t, +which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+
+ +
+
+to_RGB(self)
+

Return a non-mask video clip made from the mask video clip.

+
+ +
+
+to_gif(*a, **kw)
+

The function to_gif is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_gif, instead.

+
+ +
+
+to_images_sequence(*a, **kw)
+

The function to_images_sequence is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_images_sequence, instead.

+
+ +
+
+to_mask(self, canal=0)
+

Return a mask a video clip made from the clip.

+
+ +
+
+to_videofile(*a, **kw)
+

The function to_videofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_videofile, instead.

+
+ +
+
+without_audio(self)
+

Remove the clip’s audio.

+

Return a copy of the clip with audio set to None.

+
+ +
+
+write_gif(self, filename, fps=None, program='imageio', opt='nq', fuzz=1, verbose=True, loop=0, dispose=False, colors=None, tempfiles=False, logger='bar')
+

Write the VideoClip to a GIF file.

+

Converts a VideoClip into an animated GIF using ImageMagick +or ffmpeg.

+ +++ + + + +
Parameters:
+
filename
+

Name of the resulting gif file.

+
+
fps
+

Number of frames per second (see note below). If it +isn’t provided, then the function will look for the clip’s +fps attribute (VideoFileClip, for instance, have one).

+
+
program
+

Software to use for the conversion, either ‘imageio’ (this will use +the library FreeImage through ImageIO), or ‘ImageMagick’, or ‘ffmpeg’.

+
+
opt
+

Optimalization to apply. If program=’imageio’, opt must be either ‘wu’ +(Wu) or ‘nq’ (Neuquant). If program=’ImageMagick’, +either ‘optimizeplus’ or ‘OptimizeTransparency’.

+
+
fuzz
+

(ImageMagick only) Compresses the GIF by considering that +the colors that are less than fuzz% different are in fact +the same.

+
+
tempfiles
+

Writes every frame to a file instead of passing them in the RAM. +Useful on computers with little RAM. Can only be used with +ImageMagick’ or ‘ffmpeg’.

+
+
progress_bar
+

If True, displays a progress bar

+
+
+
+

Notes

+

The gif will be playing the clip in real time (you can +only change the frame rate). If you want the gif to be played +slower than the clip you will use

+
>>> # slow down clip 50% and make it a gif
+>>> myClip.speedx(0.5).to_gif('myClip.gif')
+
+
+
+ +
+
+write_images_sequence(self, nameformat, fps=None, verbose=True, withmask=True, logger='bar')
+

Writes the videoclip to a sequence of image files.

+ +++ + + + + + +
Parameters:
+
nameformat
+

A filename specifying the numerotation format and extension +of the pictures. For instance “frame%03d.png” for filenames +indexed with 3 digits and PNG format. Also possible: +“some_folder/frame%04d.jpeg”, etc.

+
+
fps
+

Number of frames per second to consider when writing the +clip. If not specified, the clip’s fps attribute will +be used if it has one.

+
+
withmask
+

will save the clip’s mask (if any) as an alpha canal (PNGs only).

+
+
verbose
+

Boolean indicating whether to print information.

+
+
logger
+

Either ‘bar’ (progress bar) or None or any Proglog logger.

+
+
+
Returns:
+
names_list
+

A list of all the files generated.

+
+
+
+

Notes

+

The resulting image sequence can be read using e.g. the class +ImageSequenceClip.

+
+ +
+
+write_videofile(self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, rewrite_audio=True, remove_temp=True, write_logfile=False, verbose=True, threads=None, ffmpeg_params=None, logger='bar')
+

Write the clip to a videofile.

+ +++ + + + +
Parameters:
+
filename
+

Name of the video file to write in. +The extension must correspond to the “codec” used (see below), +or simply be ‘.avi’ (which will work with any codec).

+
+
fps
+

Number of frames per second in the resulting video file. If None is +provided, and the clip has an fps attribute, this fps will be used.

+
+
codec
+

Codec to use for image encoding. Can be any codec supported +by ffmpeg. If the filename is has extension ‘.mp4’, ‘.ogv’, ‘.webm’, +the codec will be set accordingly, but you can still set it if you +don’t like the default. For other extensions, the output filename +must be set accordingly.

+

Some examples of codecs are:

+

'libx264' (default codec for file extension .mp4) +makes well-compressed videos (quality tunable using ‘bitrate’).

+

'mpeg4' (other codec for extension .mp4) can be an alternative +to 'libx264', and produces higher quality videos by default.

+

'rawvideo' (use file extension .avi) will produce +a video of perfect quality, of possibly very huge size.

+

png (use file extension .avi) will produce a video +of perfect quality, of smaller size than with rawvideo.

+

'libvorbis' (use file extension .ogv) is a nice video +format, which is completely free/ open source. However not +everyone has the codecs installed by default on their machine.

+

'libvpx' (use file extension .webm) is tiny a video +format well indicated for web videos (with HTML5). Open source.

+
+
audio
+

Either True, False, or a file name. +If True and the clip has an audio clip attached, this +audio clip will be incorporated as a soundtrack in the movie. +If audio is the name of an audio file, this audio file +will be incorporated as a soundtrack in the movie.

+
+
audiofps
+

frame rate to use when generating the sound.

+
+
temp_audiofile
+

the name of the temporary audiofile to be generated and +incorporated in the the movie, if any.

+
+
audio_codec
+

Which audio codec should be used. Examples are ‘libmp3lame’ +for ‘.mp3’, ‘libvorbis’ for ‘ogg’, ‘libfdk_aac’:’m4a’, +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav. +Default is ‘libmp3lame’, unless the video extension is ‘ogv’ +or ‘webm’, at which case the default is ‘libvorbis’.

+
+
audio_bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size/quality of audio in the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the final file.

+
+
preset
+

Sets the time that FFMPEG will spend optimizing the compression. +Choices are: ultrafast, superfast, veryfast, faster, fast, medium, +slow, slower, veryslow, placebo. Note that this does not impact +the quality of the video, only the size of the video file. So +choose ultrafast when you are in a hurry and file size does not +matter.

+
+
threads
+

Number of threads to use for ffmpeg. Can speed up the writing of +the video on multicore computers.

+
+
ffmpeg_params
+

Any additional ffmpeg parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’].

+
+
write_logfile
+

If true, will write log files for the audio and the video. +These will be files ending with ‘.log’ with the name of the +output file in them.

+
+
logger
+

Either “bar” for progress bar or None or any Proglog logger.

+
+
verbose (deprecated, kept for compatibility)
+

Formerly used for toggling messages on/off. Use logger=None now.

+
+
+
+

Examples

+
>>> from moviepy.editor import VideoFileClip
+>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
+>>> clip.write_videofile("my_new_video.mp4")
+>>> clip.close()
+
+
+
+ +
+ +
+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/audiofx.html b/v1.0.3/ref/audiofx.html new file mode 100644 index 000000000..eb6bfa9ee --- /dev/null +++ b/v1.0.3/ref/audiofx.html @@ -0,0 +1,287 @@ + + + + + + + + + + audio.fx — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

audio.fx

+

The module moviepy.audio.fx regroups functions meant to be used with audio.fx(). +Note that some of these functions such as volumex (which multiplies the volume) can +be applied directly to a video clip, at which case they will affect the audio clip attached to this +video clip. Read the docs of the different functions to know when this is the case.

+

Because this module will be larger in the future, it allows two kinds of import. +You can either import a single function like this:

+
from moviepy.audio.fx.volumex import volumex
+newaudio = audioclip.fx( vfx.volumex, 0.5)
+
+
+

Or import everything:

+
import moviepy.audio.fx.all as afx
+newaudio = (audioclip.afx( vfx.normalize)
+                     .afx( vfx.volumex, 0.5)
+                     .afx( vfx.audio_fadein, 1.0)
+                     .afx( vfx.audio_fadeout, 1.0))
+
+
+

When you type

+
from moviepy.editor import *
+
+
+

the module audio.fx is loaded as afx and you can use afx.volumex, etc.

+ ++++ + + + + + + + + + + + + + + + + + +
audio_fadeinReturn an audio (or video) clip that is first mute, then the sound arrives progressively over duration seconds.
audio_fadeoutReturn a sound clip where the sound fades out progressively over duration seconds at the end of the clip.
audio_loopLoops over an audio clip.
audio_normalizeReturn a clip whose volume is normalized to 0db.
volumexReturns a clip with audio volume multiplied by the value factor.
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_fadein.html b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_fadein.html new file mode 100644 index 000000000..c2cf4f59f --- /dev/null +++ b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_fadein.html @@ -0,0 +1,250 @@ + + + + + + + + + + moviepy.audio.fx.all.audio_fadein — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.audio.fx.all.audio_fadein

+
+
+moviepy.audio.fx.all.audio_fadein(clip, duration)[source]
+

Return an audio (or video) clip that is first mute, then the +sound arrives progressively over duration seconds.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_fadeout.html b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_fadeout.html new file mode 100644 index 000000000..7d2ef1048 --- /dev/null +++ b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_fadeout.html @@ -0,0 +1,250 @@ + + + + + + + + + + moviepy.audio.fx.all.audio_fadeout — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.audio.fx.all.audio_fadeout

+
+
+moviepy.audio.fx.all.audio_fadeout(clip, duration)[source]
+

Return a sound clip where the sound fades out progressively +over duration seconds at the end of the clip.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_loop.html b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_loop.html new file mode 100644 index 000000000..bf6ee3abf --- /dev/null +++ b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_loop.html @@ -0,0 +1,259 @@ + + + + + + + + + + moviepy.audio.fx.all.audio_loop — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.audio.fx.all.audio_loop

+
+
+moviepy.audio.fx.all.audio_loop(audioclip, nloops=None, duration=None)[source]
+

Loops over an audio clip.

+

Returns an audio clip that plays the given clip either +nloops times, or during duration seconds.

+

Examples

+
>>> from moviepy.editor import *
+>>> videoclip = VideoFileClip('myvideo.mp4')
+>>> music = AudioFileClip('music.ogg')
+>>> audio = afx.audio_loop( music, duration=videoclip.duration)
+>>> videoclip.set_audio(audio)
+
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_normalize.html b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_normalize.html new file mode 100644 index 000000000..bd6efa424 --- /dev/null +++ b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.audio_normalize.html @@ -0,0 +1,256 @@ + + + + + + + + + + moviepy.audio.fx.all.audio_normalize — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.audio.fx.all.audio_normalize

+
+
+moviepy.audio.fx.all.audio_normalize(clip)[source]
+

Return a clip whose volume is normalized to 0db.

+

Return an audio (or video) clip whose audio volume is normalized +so that the maximum volume is at 0db, the maximum achievable volume.

+

Examples

+
>>> from moviepy.editor import *
+>>> videoclip = VideoFileClip('myvideo.mp4').fx(afx.audio_normalize)
+
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/audiofx/moviepy.audio.fx.all.volumex.html b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.volumex.html new file mode 100644 index 000000000..58c3e7bcd --- /dev/null +++ b/v1.0.3/ref/audiofx/moviepy.audio.fx.all.volumex.html @@ -0,0 +1,258 @@ + + + + + + + + + + moviepy.audio.fx.all.volumex — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.audio.fx.all.volumex

+
+
+moviepy.audio.fx.all.volumex(clip, factor)[source]
+

Returns a clip with audio volume multiplied by the +value factor. Can be applied to both audio and video clips.

+

This effect is loaded as a clip method when you use moviepy.editor, +so you can just write clip.volumex(2)

+

Examples

+
>>> newclip = volumex(clip, 2.0) # doubles audio volume
+>>> newclip = clip.fx( volumex, 0.5) # half audio, use with fx
+>>> newclip = clip.volumex(2) # only if you used "moviepy.editor"
+
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/audiotools.html b/v1.0.3/ref/audiotools.html new file mode 100644 index 000000000..92fcb5ab1 --- /dev/null +++ b/v1.0.3/ref/audiotools.html @@ -0,0 +1,235 @@ + + + + + + + + + + audio.tools — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

audio.tools

+

Currently empty

+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/code_origanization.html b/v1.0.3/ref/code_origanization.html new file mode 100644 index 000000000..1df34e0ad --- /dev/null +++ b/v1.0.3/ref/code_origanization.html @@ -0,0 +1,249 @@ + + + + + + + + + + Organization of MoviePy’s code — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Organization of MoviePy’s code

+

This reviews the folders and files in moviepy’s code. It’s very easy:

+

At the root of the project you have everything required for the packaging and installation of moviepy (README, setup.py, LICENCE) etc. Then you the docs/ folder with the source code of the documentation, a folder for some Example Scripts, and the main folder moviepy/ for the source code of the library itself.

+

The folder moviepy/ the classes and modules relative to the video and the audio are clearly separated into two subfolders video/ and audio/. In moviepy/ you will find all the classes, functions and decorations which are useful to both submodules audio and video:

+
    +
  • Clip.py defines the base object for AudioClip and VideoClip and the simple methods that can be used by both, like clip.subclip, clip.set_duration, etc.
  • +
  • Files config.py and config_defaults.py store the default paths to the external programs FFMPEG and ImageMagick.
  • +
  • decorators.py provides very useful decorators that automatize some tasks, like the fact that some effects, when applied to a clip, should also be applied to it’s mask, or to its audio track.
  • +
  • tools.py provides misc. functions that are useful everywhere in the library, like a standardized call to subprocess, a time converter, a standardized way to print messages in the console, etc.
  • +
  • editor.py is a helper module to easily load and initiate many functionalities of moviepy (see How to be efficient with MoviePy for more details)
  • +
+

The submodules moviepy.audio and moviepy.video are organized approximately the same way: at their root they implement base classes (respectively AudioClip and VideoClip) and they have the following submodules:

+
    +
  • io contains everything required to read files, write files, preview the clip or use a graphical interface of any sort. It contains the objects that speak to FFMEG and ImageMagick, the classes AudioFileClip and VideoFileClip, the functions used to preview a clip with pygame or to embed a video in HTML5 (for instance in the IPython Notebook).
  • +
  • fx contains a collection of effects and filters (like turning a video black and white, correcting luminosity, zooming or creating a scrolling effect). To add an effect to MoviePy, you simply add a new file my_effect.py to this folder, and in the file you define the function my_effect(clip, *other_parameters).
  • +
  • compositing contains functions and classes to compose videoclips (CompositeVideoClip, concatenate_videoclips, clips_array)
  • +
  • tools contains advanced tools that are not effects but can help edit clips or generate new clips (tracking, subtitles, etc.)
  • +
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/decorators.html b/v1.0.3/ref/decorators.html new file mode 100644 index 000000000..e95e0e453 --- /dev/null +++ b/v1.0.3/ref/decorators.html @@ -0,0 +1,301 @@ + + + + + + + + + + Decorators — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Decorators

+

These decorators are implemented to ease the writing of methods and effects in MoviePy

+

all decorators used in moviepy go there

+
+
+moviepy.decorators.add_mask_if_none(f, clip, *a, **k)[source]
+

Add a mask to the clip if there is none.

+
+ +
+
+moviepy.decorators.apply_to_audio(f, clip, *a, **k)[source]
+

This decorator will apply the function f to the audio of +the clip created with f

+
+ +
+
+moviepy.decorators.apply_to_mask(f, clip, *a, **k)[source]
+

This decorator will apply the same function f to the mask of +the clip created with f

+
+ +
+
+moviepy.decorators.audio_video_fx(f, clip, *a, **k)[source]
+

Use an audio function on a video/audio clip

+

This decorator tells that the function f (audioclip -> audioclip) +can be also used on a video clip, at which case it returns a +videoclip with unmodified video and modified audio.

+
+ +
+
+moviepy.decorators.convert_masks_to_RGB(f, clip, *a, **k)[source]
+

If the clip is a mask, convert it to RGB before running the function

+
+ +
+
+moviepy.decorators.convert_to_seconds(varnames)[source]
+

Converts the specified variables to seconds

+
+ +
+
+moviepy.decorators.outplace(f, clip, *a, **k)[source]
+

Applies f(clip.copy(), *a, **k) and returns clip.copy()

+
+ +
+
+moviepy.decorators.preprocess_args(fun, varnames)[source]
+

Applies fun to variables in varnames before launching the function

+
+ +
+
+moviepy.decorators.requires_duration(f, clip, *a, **k)[source]
+

Raise an error if the clip has no duration.

+
+ +
+
+moviepy.decorators.use_clip_fps_by_default(f, clip, *a, **k)[source]
+

Will use clip.fps if no fps=… is provided in **k

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/ffmpeg.html b/v1.0.3/ref/ffmpeg.html new file mode 100644 index 000000000..4fc5ebdd9 --- /dev/null +++ b/v1.0.3/ref/ffmpeg.html @@ -0,0 +1,269 @@ + + + + + + + + + + FFMPEG tools — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

FFMPEG tools

+

Misc. bindings to ffmpeg and ImageMagick.

+
+
+moviepy.video.io.ffmpeg_tools.ffmpeg_extract_audio(inputfile, output, bitrate=3000, fps=44100)[source]
+

extract the sound from a video file and save it in output

+
+ +
+
+moviepy.video.io.ffmpeg_tools.ffmpeg_extract_subclip(filename, t1, t2, targetname=None)[source]
+

Makes a new video file playing video file filename between +the times t1 and t2.

+
+ +
+
+moviepy.video.io.ffmpeg_tools.ffmpeg_merge_video_audio(video, audio, output, vcodec='copy', acodec='copy', ffmpeg_output=False, logger='bar')[source]
+

merges video file video and audio file audio into one +movie file output.

+
+ +
+
+moviepy.video.io.ffmpeg_tools.ffmpeg_movie_from_frames(filename, folder, fps, digits=6, bitrate='v')[source]
+

Writes a movie out of the frames (picture files) in a folder. +Almost deprecated.

+
+ +
+
+moviepy.video.io.ffmpeg_tools.ffmpeg_resize(video, output, size)[source]
+

resizes video to new size size and write the result +in file output.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/ref.html b/v1.0.3/ref/ref.html new file mode 100644 index 000000000..89cc32606 --- /dev/null +++ b/v1.0.3/ref/ref.html @@ -0,0 +1,312 @@ + + + + + + + + + + Reference Manual — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Reference Manual

+

The documentation may be a little messy for the moment, it will get better with time. +If you want to hack into the code or locate a particular function, read Organization of MoviePy’s code .

+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx.html b/v1.0.3/ref/videofx.html new file mode 100644 index 000000000..9df515b7d --- /dev/null +++ b/v1.0.3/ref/videofx.html @@ -0,0 +1,383 @@ + + + + + + + + + + moviepy.video.fx (vfx) — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx (vfx)

+

The module moviepy.video.fx regroups functions meant to be used with videoclip.fx().

+

For all other modifications, we use clip.fx and clip.fl. clip.fx is meant to make it easy to use already-written transformation functions, while clip.fl makes it easy to write new transformation functions.

+

Because this module is starting to get large and will only get larger in the future, it allows two kinds of imports. You can either import a single function like this:

+
from moviepy.video.fx.scroll import crop
+newclip = myclip.fx( vfx.crop, x1=15)
+
+
+

Or import everything:

+
import moviepy.video.fx.all as vfx
+newclip = (myclip.fx( vfx.crop, x1=15)
+                 .fx( vfx.resize, width=200)
+                 .fx( vfx.freeze_at_end, 1))
+
+
+

When you type:

+
from moviepy.editor import *
+
+
+

the module video.fx is loaded as vfx and you can use vfx.colorx, vfx.resize etc.

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
accel_decelnew_duration
blackwhiteDesaturates the picture, makes it black and white.
blinkMakes the clip blink.
colorxmultiplies the clip’s colors by the given factor, can be used to decrease or increase the clip’s brightness (is that the reight word ?)
cropReturns a new clip in which just a rectangular subregion of the original clip is conserved.
even_sizeCrops the clip to make dimensions even.
fadeinMakes the clip progressively appear from some color (black by default), over duration seconds at the beginning of the clip.
fadeoutMakes the clip progressively fade to some color (black by default), over duration seconds at the end of the clip.
freezeMomentarily freeze the clip at time t.
freeze_regionFreezes one region of the clip while the rest remains animated.
gamma_corrGamma-correction of a video clip
headblurReturns a filter that will blurr a moving part (a head ?) of the frames.
invert_colorsReturns the color-inversed clip.
loopReturns a clip that plays the current clip in an infinite loop.
lum_contrastluminosity-contrast correction of a clip
make_loopableMakes the clip fade in progressively at its own end, this way it can be looped indefinitely.
marginDraws an external margin all around the frame.
mask_andReturns the logical ‘and’ (min) between two masks.
mask_colorReturns a new clip with a mask for transparency where the original clip is of the given color.
mask_orReturns the logical ‘or’ (max) between two masks.
mirror_xflips the clip horizontally (and its mask too, by default)
mirror_yflips the clip vertically (and its mask too, by default)
paintingTransforms any photo into some kind of painting.
resizeReturns a video clip that is a resized version of the clip.
rotateChange unit to ‘rad’ to define angles as radians.
scrollScrolls horizontally or vertically a clip, e.g.
speedxReturns a clip playing the current clip but at a speed multiplied by factor.
supersampleReplaces each frame at time t by the mean of nframes equally spaced frames taken in the interval [t-d, t+d].
time_mirrorReturns a clip that plays the current clip backwards.
time_symmetrizeReturns a clip that plays the current clip once forwards and then once backwards.
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.accel_decel.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.accel_decel.html new file mode 100644 index 000000000..801e045b7 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.accel_decel.html @@ -0,0 +1,284 @@ + + + + + + + + + + moviepy.video.fx.all.accel_decel — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.accel_decel

+
+
+moviepy.video.fx.all.accel_decel(clip, new_duration=None, abruptness=1.0, soonness=1.0)[source]
+
+
new_duration
+
If None, will be that of the current clip.
+
abruptness
+
negative abruptness (>-1): speed up down up +zero abruptness : no effect +positive abruptness: speed down up down
+
soonness
+
for positive abruptness, determines how soon the +speedup occurs (0<soonness < inf)
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.blackwhite.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.blackwhite.html new file mode 100644 index 000000000..380b64f5d --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.blackwhite.html @@ -0,0 +1,278 @@ + + + + + + + + + + moviepy.video.fx.all.blackwhite — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.blackwhite

+
+
+moviepy.video.fx.all.blackwhite(clip, RGB=None, preserve_luminosity=True)[source]
+

Desaturates the picture, makes it black and white. +Parameter RGB allows to set weights for the different color +channels. +If RBG is ‘CRT_phosphor’ a special set of values is used. +preserve_luminosity maintains the sum of RGB to 1.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.blink.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.blink.html new file mode 100644 index 000000000..91041dbae --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.blink.html @@ -0,0 +1,276 @@ + + + + + + + + + + moviepy.video.fx.all.blink — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + + + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.colorx.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.colorx.html new file mode 100644 index 000000000..23ee3e294 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.colorx.html @@ -0,0 +1,276 @@ + + + + + + + + + + moviepy.video.fx.all.colorx — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.colorx

+
+
+moviepy.video.fx.all.colorx(clip, factor)[source]
+

multiplies the clip’s colors by the given factor, can be used +to decrease or increase the clip’s brightness (is that the +reight word ?)

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.crop.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.crop.html new file mode 100644 index 000000000..58e1fa5c6 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.crop.html @@ -0,0 +1,299 @@ + + + + + + + + + + moviepy.video.fx.all.crop — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.crop

+
+
+moviepy.video.fx.all.crop(clip, x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None)[source]
+

Returns a new clip in which just a rectangular subregion of the +original clip is conserved. x1,y1 indicates the top left corner and +x2,y2 is the lower right corner of the croped region. +All coordinates are in pixels. Float numbers are accepted.

+

To crop an arbitrary rectangle:

+
>>> crop(clip, x1=50, y1=60, x2=460, y2=275)
+
+
+

Only remove the part above y=30:

+
>>> crop(clip, y1=30)
+
+
+

Crop a rectangle that starts 10 pixels left and is 200px wide

+
>>> crop(clip, x1=10, width=200)
+
+
+

Crop a rectangle centered in x,y=(300,400), width=50, height=150 :

+
>>> crop(clip,  x_center=300 , y_center=400,
+                    width=50, height=150)
+
+
+

Any combination of the above should work, like for this rectangle +centered in x=300, with explicit y-boundaries:

+
>>> crop(x_center=300, width=400, y1=100, y2=600)
+
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.even_size.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.even_size.html new file mode 100644 index 000000000..09c104951 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.even_size.html @@ -0,0 +1,274 @@ + + + + + + + + + + moviepy.video.fx.all.even_size — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.even_size

+
+
+moviepy.video.fx.all.even_size(clip)[source]
+

Crops the clip to make dimensions even.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.fadein.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.fadein.html new file mode 100644 index 000000000..e8c833f34 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.fadein.html @@ -0,0 +1,278 @@ + + + + + + + + + + moviepy.video.fx.all.fadein — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.fadein

+
+
+moviepy.video.fx.all.fadein(clip, duration, initial_color=None)[source]
+

Makes the clip progressively appear from some color (black by default), +over duration seconds at the beginning of the clip. Can be used for +masks too, where the initial color must be a number between 0 and 1. +For cross-fading (progressive appearance or disappearance of a clip +over another clip, see composition.crossfade

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.fadeout.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.fadeout.html new file mode 100644 index 000000000..213967400 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.fadeout.html @@ -0,0 +1,278 @@ + + + + + + + + + + moviepy.video.fx.all.fadeout — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.fadeout

+
+
+moviepy.video.fx.all.fadeout(clip, duration, final_color=None)[source]
+

Makes the clip progressively fade to some color (black by default), +over duration seconds at the end of the clip. Can be used for +masks too, where the final color must be a number between 0 and 1. +For cross-fading (progressive appearance or disappearance of a clip +over another clip, see composition.crossfade

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.freeze.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.freeze.html new file mode 100644 index 000000000..603cf42c2 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.freeze.html @@ -0,0 +1,280 @@ + + + + + + + + + + moviepy.video.fx.all.freeze — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.freeze

+
+
+moviepy.video.fx.all.freeze(clip, t=0, freeze_duration=None, total_duration=None, padding_end=0)[source]
+

Momentarily freeze the clip at time t.

+

Set t=’end’ to freeze the clip at the end (actually it will freeze on the +frame at time clip.duration - padding_end seconds). +With duration``you can specify the duration of the freeze. +With ``total_duration you can specify the total duration of +the clip and the freeze (i.e. the duration of the freeze is +automatically calculated). One of them must be provided.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.freeze_region.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.freeze_region.html new file mode 100644 index 000000000..00a40a624 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.freeze_region.html @@ -0,0 +1,302 @@ + + + + + + + + + + moviepy.video.fx.all.freeze_region — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.freeze_region

+
+
+moviepy.video.fx.all.freeze_region(clip, t=0, region=None, outside_region=None, mask=None)[source]
+

Freezes one region of the clip while the rest remains animated.

+

You can choose one of three methods by providing either region, +outside_region, or mask.

+ +++ + + + +
Parameters:
+
t
+

Time at which to freeze the freezed region.

+
+
region
+

A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) +which will be freezed. You can provide outside_region or mask instead.

+
+
outside_region
+

A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels) +which will be the only non-freezed region.

+
+
mask
+

If not None, will overlay a freezed version of the clip on the current clip, +with the provided mask. In other words, the “visible” pixels in the mask +indicate the freezed region in the final picture.

+
+
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.gamma_corr.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.gamma_corr.html new file mode 100644 index 000000000..09b34b893 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.gamma_corr.html @@ -0,0 +1,274 @@ + + + + + + + + + + moviepy.video.fx.all.gamma_corr — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.gamma_corr

+
+
+moviepy.video.fx.all.gamma_corr(clip, gamma)[source]
+

Gamma-correction of a video clip

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.headblur.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.headblur.html new file mode 100644 index 000000000..bda4a59cc --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.headblur.html @@ -0,0 +1,280 @@ + + + + + + + + + + moviepy.video.fx.all.headblur — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.headblur

+
+
+moviepy.video.fx.all.headblur(clip, fx, fy, r_zone, r_blur=None)[source]
+

Returns a filter that will blurr a moving part (a head ?) of +the frames. The position of the blur at time t is +defined by (fx(t), fy(t)), the radius of the blurring +by r_zone and the intensity of the blurring by r_blur. +Requires OpenCV for the circling and the blurring. +Automatically deals with the case where part of the image goes +offscreen.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.invert_colors.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.invert_colors.html new file mode 100644 index 000000000..4e6ad37d9 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.invert_colors.html @@ -0,0 +1,276 @@ + + + + + + + + + + moviepy.video.fx.all.invert_colors — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.invert_colors

+
+
+moviepy.video.fx.all.invert_colors(clip)[source]
+

Returns the color-inversed clip.

+

The values of all pixels are replaced with (255-v) or (1-v) for masks +Black becomes white, green becomes purple, etc.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.loop.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.loop.html new file mode 100644 index 000000000..e6cf68ec8 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.loop.html @@ -0,0 +1,292 @@ + + + + + + + + + + moviepy.video.fx.all.loop — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.loop

+
+
+moviepy.video.fx.all.loop(self, n=None, duration=None)[source]
+

Returns a clip that plays the current clip in an infinite loop. +Ideal for clips coming from gifs.

+ +++ + + + +
Parameters:
+
n
+

Number of times the clip should be played. If None the +the clip will loop indefinitely (i.e. with no set duration).

+
+
duration
+

Total duration of the clip. Can be specified instead of n.

+
+
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.lum_contrast.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.lum_contrast.html new file mode 100644 index 000000000..508a930f1 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.lum_contrast.html @@ -0,0 +1,274 @@ + + + + + + + + + + moviepy.video.fx.all.lum_contrast — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.lum_contrast

+
+
+moviepy.video.fx.all.lum_contrast(clip, lum=0, contrast=0, contrast_thr=127)[source]
+

luminosity-contrast correction of a clip

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.make_loopable.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.make_loopable.html new file mode 100644 index 000000000..785ce3b21 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.make_loopable.html @@ -0,0 +1,276 @@ + + + + + + + + + + moviepy.video.fx.all.make_loopable — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.make_loopable

+
+
+moviepy.video.fx.all.make_loopable(clip, cross)[source]
+

Makes the clip fade in progressively at its own end, this way +it can be looped indefinitely. cross is the duration in seconds +of the fade-in.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.margin.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.margin.html new file mode 100644 index 000000000..fd4435982 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.margin.html @@ -0,0 +1,291 @@ + + + + + + + + + + moviepy.video.fx.all.margin — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.margin

+
+
+moviepy.video.fx.all.margin(clip, mar=None, left=0, right=0, top=0, bottom=0, color=(0, 0, 0), opacity=1.0)[source]
+

Draws an external margin all around the frame.

+ +++ + + + +
Parameters:
    +
  • mar – if not None, then the new clip has a margin of +size mar in pixels on the left, right, top, and bottom.
  • +
  • right, top, bottom (left,) – width of the margin in pixel +in these directions.
  • +
  • color – color of the margin.
  • +
  • mask_margin – value of the mask on the margin. Setting +this value to 0 yields transparent margins.
  • +
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_and.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_and.html new file mode 100644 index 000000000..882297dae --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_and.html @@ -0,0 +1,276 @@ + + + + + + + + + + moviepy.video.fx.all.mask_and — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.mask_and

+
+
+moviepy.video.fx.all.mask_and(clip, other_clip)[source]
+

Returns the logical ‘and’ (min) between two masks. +other_clip can be a mask clip or a picture (np.array). +The result has the duration of ‘clip’ (if it has any)

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_color.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_color.html new file mode 100644 index 000000000..d90c3a886 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_color.html @@ -0,0 +1,281 @@ + + + + + + + + + + moviepy.video.fx.all.mask_color — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.mask_color

+
+
+moviepy.video.fx.all.mask_color(clip, color=None, thr=0, s=1)[source]
+

Returns a new clip with a mask for transparency where the original +clip is of the given color.

+

You can also have a “progressive” mask by specifying a non-nul distance +threshold thr. In this case, if the distance between a pixel and the given +color is d, the transparency will be

+

d**s / (thr**s + d**s)

+

which is 1 when d>>thr and 0 for d<<thr, the stiffness of the effect being +parametrized by s

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_or.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_or.html new file mode 100644 index 000000000..0a1d0c511 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.mask_or.html @@ -0,0 +1,276 @@ + + + + + + + + + + moviepy.video.fx.all.mask_or — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.mask_or

+
+
+moviepy.video.fx.all.mask_or(clip, other_clip)[source]
+

Returns the logical ‘or’ (max) between two masks. +other_clip can be a mask clip or a picture (np.array). +The result has the duration of ‘clip’ (if it has any)

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.mirror_x.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.mirror_x.html new file mode 100644 index 000000000..96ade7be1 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.mirror_x.html @@ -0,0 +1,274 @@ + + + + + + + + + + moviepy.video.fx.all.mirror_x — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.mirror_x

+
+
+moviepy.video.fx.all.mirror_x(clip, apply_to='mask')[source]
+

flips the clip horizontally (and its mask too, by default)

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.mirror_y.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.mirror_y.html new file mode 100644 index 000000000..40434f0aa --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.mirror_y.html @@ -0,0 +1,274 @@ + + + + + + + + + + moviepy.video.fx.all.mirror_y — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.mirror_y

+
+
+moviepy.video.fx.all.mirror_y(clip, apply_to='mask')[source]
+

flips the clip vertically (and its mask too, by default)

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.painting.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.painting.html new file mode 100644 index 000000000..bc5116176 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.painting.html @@ -0,0 +1,277 @@ + + + + + + + + + + moviepy.video.fx.all.painting — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.painting

+
+
+moviepy.video.fx.all.painting(clip, newsize=None, height=None, width=None)[source]
+

Transforms any photo into some kind of painting. Saturation +tells at which point the colors of the result should be +flashy. black gives the anount of black lines wanted. +Requires Scikit-image or Scipy installed.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.resize.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.resize.html new file mode 100644 index 000000000..19d5b0dc1 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.resize.html @@ -0,0 +1,310 @@ + + + + + + + + + + moviepy.video.fx.all.resize — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.resize

+
+
+moviepy.video.fx.all.resize(clip, newsize=None, height=None, width=None, apply_to_mask=True)[source]
+

Returns a video clip that is a resized version of the clip.

+ +++ + + + +
Parameters:
+
newsize:
+
+
Can be either
+
    +
  • (width,height) in pixels or a float representing
  • +
  • A scaling factor, like 0.5
  • +
  • A function of time returning one of these.
  • +
+
+
+
+
width:
+

width of the new clip in pixel. The height is then computed so +that the width/height ratio is conserved.

+
+
height:
+

height of the new clip in pixel. The width is then computed so +that the width/height ratio is conserved.

+
+
+
+

Examples

+
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
+>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
+>>> myClip.resize(width=800) # height computed automatically.
+>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
+
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.rotate.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.rotate.html new file mode 100644 index 000000000..9dc71de5f --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.rotate.html @@ -0,0 +1,304 @@ + + + + + + + + + + moviepy.video.fx.all.rotate — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.rotate

+
+
+moviepy.video.fx.all.rotate(clip, angle, unit='deg', resample='bicubic', expand=True)[source]
+

Change unit to ‘rad’ to define angles as radians. +If the angle is not one of 90, 180, -90, -180 (degrees) there will be +black borders. You can make them transparent with

+
>>> newclip = clip.add_mask().rotate(72)
+
+
+ +++ + + + +
Parameters:
+
clip
+

A video clip

+
+
angle
+

Either a value or a function angle(t) representing the angle of rotation

+
+
unit
+

Unit of parameter angle (either deg for degrees or rad for radians)

+
+
resample
+

One of “nearest”, “bilinear”, or “bicubic”.

+
+
expand
+

Only applIf False, the clip will maintain the same True, the clip will be resized so that the whole

+
+
+
+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.scroll.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.scroll.html new file mode 100644 index 000000000..ab8fe8f10 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.scroll.html @@ -0,0 +1,275 @@ + + + + + + + + + + moviepy.video.fx.all.scroll — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.scroll

+
+
+moviepy.video.fx.all.scroll(clip, h=None, w=None, x_speed=0, y_speed=0, x_start=0, y_start=0, apply_to='mask')[source]
+

Scrolls horizontally or vertically a clip, e.g. to make end +credits

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.speedx.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.speedx.html new file mode 100644 index 000000000..f2280702c --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.speedx.html @@ -0,0 +1,278 @@ + + + + + + + + + + moviepy.video.fx.all.speedx — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.speedx

+
+
+moviepy.video.fx.all.speedx(clip, factor=None, final_duration=None)[source]
+

Returns a clip playing the current clip but at a speed multiplied +by factor. Instead of factor one can indicate the desired +final_duration of the clip, and the factor will be automatically +computed. +The same effect is applied to the clip’s audio and mask if any.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.supersample.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.supersample.html new file mode 100644 index 000000000..16ca3f8c7 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.supersample.html @@ -0,0 +1,275 @@ + + + + + + + + + + moviepy.video.fx.all.supersample — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.supersample

+
+
+moviepy.video.fx.all.supersample(clip, d, nframes)[source]
+

Replaces each frame at time t by the mean of nframes equally spaced frames +taken in the interval [t-d, t+d]. This results in motion blur.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.time_mirror.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.time_mirror.html new file mode 100644 index 000000000..8b7b427fe --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.time_mirror.html @@ -0,0 +1,276 @@ + + + + + + + + + + moviepy.video.fx.all.time_mirror — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.time_mirror

+
+
+moviepy.video.fx.all.time_mirror(self)[source]
+

Returns a clip that plays the current clip backwards. +The clip must have its duration attribute set. +The same effect is applied to the clip’s audio and mask if any.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videofx/moviepy.video.fx.all.time_symmetrize.html b/v1.0.3/ref/videofx/moviepy.video.fx.all.time_symmetrize.html new file mode 100644 index 000000000..526a967f7 --- /dev/null +++ b/v1.0.3/ref/videofx/moviepy.video.fx.all.time_symmetrize.html @@ -0,0 +1,278 @@ + + + + + + + + + + moviepy.video.fx.all.time_symmetrize — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

moviepy.video.fx.all.time_symmetrize

+
+
+moviepy.video.fx.all.time_symmetrize(clip)[source]
+

Returns a clip that plays the current clip once forwards and +then once backwards. This is very practival to make video that +loop well, e.g. to create animated GIFs. +This effect is automatically applied to the clip’s mask and audio +if they exist.

+
+ +
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/ref/videotools.html b/v1.0.3/ref/videotools.html new file mode 100644 index 000000000..370da09fa --- /dev/null +++ b/v1.0.3/ref/videotools.html @@ -0,0 +1,1198 @@ + + + + + + + + + + video.tools — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

video.tools

+

This module regroups advanced, useful (and less useful) functions for editing videos, by alphabetical order.

+
+

Credits

+

This module contains different functions to make end and opening +credits, even though it is difficult to fill everyone needs in this +matter.

+
+
+moviepy.video.tools.credits.credits1(creditfile, width, stretch=30, color='white', stroke_color='black', stroke_width=2, font='Impact-Normal', fontsize=60, gap=0)[source]
+
+++ + + + + + +
Parameters:
+
creditfile
+

A text file whose content must be as follows:

+
# This is a comment
+# The next line says : leave 4 blank lines
+.blank 4
+
+..Executive Story Editor
+MARCEL DURAND
+
+..Associate Producers
+MARTIN MARCEL
+DIDIER MARTIN
+
+..Music Supervisor
+JEAN DIDIER
+
+
+
+
width
+

Total width of the credits text in pixels

+
+
gap
+

Horizontal gap in pixels between the jobs and the names

+
+
color
+

Color of the text. See TextClip.list('color') +for a list of acceptable names.

+
+
font
+

Name of the font to use. See TextClip.list('font') for +the list of fonts you can use on your computer.

+
+
fontsize
+

Size of font to use

+
+
stroke_color
+

Color of the stroke (=contour line) of the text. If None, +there will be no stroke.

+
+
stroke_width
+

Width of the stroke, in pixels. Can be a float, like 1.5.

+
+
+
Returns:
+
image
+

An ImageClip instance that looks like this and can be scrolled +to make some credits:

+
+
+
Executive Story Editor MARCEL DURAND
+
+
Associate Producers MARTIN MARCEL
+
+

DIDIER MARTIN

+
+

Music Supervisor JEAN DIDIER

+
+
+
+
+
+
+
+
+
+ +
+
+

Drawing

+

This module deals with making images (np arrays). It provides drawing +methods that are difficult to do with the existing Python libraries.

+
+
+moviepy.video.tools.drawing.blit(im1, im2, pos=None, mask=None, ismask=False)[source]
+

Blit an image over another.

+

Blits im1 on im2 as position pos=(x,y), using the +mask if provided. If im1 and im2 are mask pictures +(2D float arrays) then ismask must be True.

+
+ +
+
+moviepy.video.tools.drawing.circle(screensize, center, radius, col1=1.0, col2=0, blur=1)[source]
+

Draw an image with a circle.

+

Draws a circle of color col1, on a background of color col2, +on a screen of size screensize at the position center=(x,y), +with a radius radius but slightly blurred on the border by blur +pixels

+
+ +
+
+moviepy.video.tools.drawing.color_gradient(size, p1, p2=None, vector=None, r=None, col1=0, col2=1.0, shape='linear', offset=0)[source]
+

Draw a linear, bilinear, or radial gradient.

+

The result is a picture of size size, whose color varies +gradually from color col1 in position p1 to color col2 +in position p2.

+

If it is a RGB picture the result must be transformed into +a ‘uint8’ array to be displayed normally:

+ +++ + + + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the final picture/array.

+
+
p1, p2
+

Coordinates (x,y) in pixels of the limit point for col1 +and col2. The color ‘before’ p1 is col1 and it +gradually changes in the direction of p2 until it is col2 +when it reaches p2.

+
+
vector
+

A vector [x,y] in pixels that can be provided instead of p2. +p2 is then defined as (p1 + vector).

+
+
col1, col2
+

Either floats between 0 and 1 (for gradients used in masks) +or [R,G,B] arrays (for colored gradients).

+
+
shape
+

‘linear’, ‘bilinear’, or ‘circular’. +In a linear gradient the color varies in one direction, +from point p1 to point p2. +In a bilinear gradient it also varies symetrically form p1 +in the other direction. +In a circular gradient it goes from col1 to col2 in all +directions.

+
+
offset
+

Real number between 0 and 1 indicating the fraction of the vector +at which the gradient actually starts. For instance if offset +is 0.9 in a gradient going from p1 to p2, then the gradient will +only occur near p2 (before that everything is of color col1) +If the offset is 0.9 in a radial gradient, the gradient will +occur in the region located between 90% and 100% of the radius, +this creates a blurry disc of radius d(p1,p2).

+
+
+
Returns:
+
image
+

An Numpy array of dimensions (W,H,ncolors) of type float +representing the image of the gradient.

+
+
+
+

Examples

+
>>> grad = color_gradient(blabla).astype('uint8')
+
+
+
+ +
+
+moviepy.video.tools.drawing.color_split(size, x=None, y=None, p1=None, p2=None, vector=None, col1=0, col2=1.0, grad_width=0)[source]
+

Make an image splitted in 2 colored regions.

+

Returns an array of size size divided in two regions called 1 and +2 in wht follows, and which will have colors col& and col2 +respectively.

+ +++ + + + +
Parameters:
+
x: (int)
+

If provided, the image is splitted horizontally in x, the left +region being region 1.

+
+
y: (int)
+

If provided, the image is splitted vertically in y, the top region +being region 1.

+
+
p1,p2:
+

Positions (x1,y1),(x2,y2) in pixels, where the numbers can be +floats. Region 1 is defined as the whole region on the left when +going from p1 to p2.

+
+
p1, vector:
+

p1 is (x1,y1) and vector (v1,v2), where the numbers can be +floats. Region 1 is then the region on the left when starting +in position p1 and going in the direction given by vector.

+
+
gradient_width
+

If not zero, the split is not sharp, but gradual over a region of +width gradient_width (in pixels). This is preferable in many +situations (for instance for antialiasing).

+
+
+
+

Examples

+
>>> size = [200,200]
+>>> # an image with all pixels with x<50 =0, the others =1
+>>> color_split(size, x=50, col1=0, col2=1)
+>>> # an image with all pixels with y<50 red, the others green
+>>> color_split(size, x=50, col1=[255,0,0], col2=[0,255,0])
+>>> # An image splitted along an arbitrary line (see below) 
+>>> color_split(size, p1=[20,50], p2=[25,70] col1=0, col2=1)
+
+
+
+ +
+
+

Segmenting

+
+
+

Subtitles

+

Experimental module for subtitles support.

+
+
+class moviepy.video.tools.subtitles.SubtitlesClip(subtitles, make_textclip=None)[source]
+

Bases: moviepy.video.VideoClip.VideoClip

+

A Clip that serves as “subtitle track” in videos.

+

One particularity of this class is that the images of the +subtitle texts are not generated beforehand, but only if +needed.

+ +++ + + + +
Parameters:
+
subtitles
+

Either the name of a file, or a list

+
+
+
+

Examples

+
>>> from moviepy.video.tools.subtitles import SubtitlesClip
+>>> from moviepy.video.io.VideoFileClip import VideoFileClip
+>>> generator = lambda txt: TextClip(txt, font='Georgia-Regular', fontsize=24, color='white')
+>>> sub = SubtitlesClip("subtitles.srt", generator)
+>>> myvideo = VideoFileClip("myvideo.avi")
+>>> final = CompositeVideoClip([clip, subtitles])
+>>> final.write_videofile("final.mp4", fps=myvideo.fps)
+
+
+
+
+add_mask(self)
+

Add a mask VideoClip to the VideoClip.

+

Returns a copy of the clip with a completely opaque mask +(made of ones). This makes computations slower compared to +having a None mask but can be useful in many cases. Choose

+

Set constant_size to False for clips with moving +image size.

+
+ +
+
+afx(self, fun, *a, **k)
+

Transform the clip’s audio.

+

Return a new clip whose audio has been transformed by fun.

+
+ +
+
+blit_on(self, picture, t)
+

Returns the result of the blit of the clip’s frame at time t +on the given picture, the position of the clip being given +by the clip’s pos attribute. Meant for compositing.

+
+ +
+
+close(self)
+

Release any resources that are in use.

+
+ +
+
+copy(self)
+

Shallow copy of the clip.

+

Returns a shallow copy of the clip whose mask and audio will +be shallow copies of the clip’s mask and audio if they exist.

+

This method is intensively used to produce new clips every time +there is an outplace transformation of the clip (clip.resize, +clip.subclip, etc.)

+
+ +
+
+cutout(self, ta, tb)
+

Returns a clip playing the content of the current clip but +skips the extract between ta and tb, which can be +expressed in seconds (15.35), in (min, sec), in (hour, min, sec), +or as a string: ‘01:03:05.35’. +If the original clip has a duration attribute set, +the duration of the returned clip is automatically computed as +`` duration - (tb - ta)``.

+

The resulting clip’s audio and mask will also be cutout +if they exist.

+
+ +
+
+fl(self, fun, apply_to=None, keep_duration=True)
+

General processing of a clip.

+

Returns a new Clip whose frames are a transformation +(through function fun) of the frames of the current clip.

+ +++ + + + +
Parameters:
+
fun
+

A function with signature (gf,t -> frame) where gf will +represent the current clip’s get_frame method, +i.e. gf is a function (t->image). Parameter t is a time +in seconds, frame is a picture (=Numpy array) which will be +returned by the transformed clip (see examples below).

+
+
apply_to
+

Can be either 'mask', or 'audio', or +['mask','audio']. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration
+

Set to True if the transformation does not change the +duration of the clip.

+
+
+
+

Examples

+

In the following newclip a 100 pixels-high clip whose video +content scrolls from the top to the bottom of the frames of +clip.

+
>>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :]
+>>> newclip = clip.fl(fl, apply_to='mask')
+
+
+
+ +
+
+fl_image(self, image_func, apply_to=None)
+

Modifies the images of a clip by replacing the frame +get_frame(t) by another frame, image_func(get_frame(t))

+
+ +
+
+fl_time(self, t_func, apply_to=None, keep_duration=False)
+

Returns a Clip instance playing the content of the current clip +but with a modified timeline, time t being replaced by another +time t_func(t).

+ +++ + + + +
Parameters:
+
t_func:
+

A function t-> new_t

+
+
apply_to:
+

Can be either ‘mask’, or ‘audio’, or [‘mask’,’audio’]. +Specifies if the filter fl should also be applied to the +audio or the mask of the clip, if any.

+
+
keep_duration:
+

False (default) if the transformation modifies the +duration of the clip.

+
+
+
+

Examples

+
>>> # plays the clip (and its mask and sound) twice faster
+>>> newclip = clip.fl_time(lambda: 2*t, apply_to=['mask', 'audio'])
+>>>
+>>> # plays the clip starting at t=3, and backwards:
+>>> newclip = clip.fl_time(lambda: 3-t)
+
+
+
+ +
+
+fx(self, func, *args, **kwargs)
+

Returns the result of func(self, *args, **kwargs). +for instance

+
>>> newclip = clip.fx(resize, 0.2, method='bilinear')
+
+
+

is equivalent to

+
>>> newclip = resize(clip, 0.2, method='bilinear')
+
+
+

The motivation of fx is to keep the name of the effect near its +parameters, when the effects are chained:

+
>>> from moviepy.video.fx import volumex, resize, mirrorx
+>>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx )
+>>> # Is equivalent, but clearer than
+>>> resize( volumex( mirrorx( clip ), 0.5), 0.3)
+
+
+
+ +
+
+get_frame(self, t)
+

Gets a numpy array representing the RGB picture of the clip at time t +or (mono or stereo) value for a sound clip

+
+ +
+
+in_subclip(self, t_start=None, t_end=None)[source]
+

Returns a sequence of [(t1,t2), txt] covering all the given subclip +from t_start to t_end. The first and last times will be cropped so as +to be exactly t_start and t_end if possible.

+
+ +
+
+is_playing(self, t)
+

If t is a time, returns true if t is between the start and +the end of the clip. t can be expressed in seconds (15.35), +in (min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’. +If t is a numpy array, returns False if none of the t is in +theclip, else returns a vector [b_1, b_2, b_3…] where b_i +is true iff tti is in the clip.

+
+ +
+
+iter_frames(self, fps=None, with_times=False, logger=None, dtype=None)
+

Iterates over all the frames of the clip.

+

Returns each frame of the clip as a HxWxN np.array, +where N=1 for mask clips and N=3 for RGB clips.

+

This function is not really meant for video editing. +It provides an easy way to do frame-by-frame treatment of +a video, for fields like science, computer vision…

+

The fps (frames per second) parameter is optional if the +clip already has a fps attribute.

+

Use dtype=”uint8” when using the pictures to write video, images…

+

Examples

+
>>> # prints the maximum of red that is contained
+>>> # on the first line of each frame of the clip.
+>>> from moviepy.editor import VideoFileClip
+>>> myclip = VideoFileClip('myvideo.mp4')
+>>> print ( [frame[0,:,0].max()
+             for frame in myclip.iter_frames()])
+
+
+
+ +
+
+on_color(self, size=None, color=(0, 0, 0), pos=None, col_opacity=None)
+

Place the clip on a colored background.

+

Returns a clip made of the current clip overlaid on a color +clip of a possibly bigger size. Can serve to flatten transparent +clips.

+ +++ + + + +
Parameters:
+
size
+

Size (width, height) in pixels of the final clip. +By default it will be the size of the current clip.

+
+
color
+

Background color of the final clip ([R,G,B]).

+
+
pos
+

Position of the clip in the final clip. ‘center’ is the default

+
+
col_opacity
+

Parameter in 0..1 indicating the opacity of the colored +background.

+
+
+
+
+ +
+
+save_frame(self, filename, t=0, withmask=True)
+

Save a clip’s frame to an image file.

+

Saves the frame of clip corresponding to time t in +‘filename’. t can be expressed in seconds (15.35), in +(min, sec), in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If withmask is True the mask is saved in +the alpha layer of the picture (only works with PNGs).

+
+ +
+
+set_audio(self, audioclip)
+

Attach an AudioClip to the VideoClip.

+

Returns a copy of the VideoClip instance, with the audio +attribute set to audio, which must be an AudioClip instance.

+
+ +
+
+set_duration(self, t, change_end=True)
+

Returns a copy of the clip, with the duration attribute +set to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, of the +returned clip. +If change_end is False, the start attribute of the clip will +be modified in function of the duration and the preset end +of the clip.

+
+ +
+
+set_end(self, t)
+

Returns a copy of the clip, with the end attribute set to +t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’. +Also sets the duration of the mask and audio, if any, +of the returned clip.

+
+ +
+
+set_fps(self, fps)
+

Returns a copy of the clip with a new default fps for functions like +write_videofile, iterframe, etc.

+
+ +
+
+set_ismask(self, ismask)
+

Says wheter the clip is a mask or not (ismask is a boolean)

+
+ +
+
+set_make_frame(self, mf)
+

Change the clip’s get_frame.

+

Returns a copy of the VideoClip instance, with the make_frame +attribute set to mf.

+
+ +
+
+set_mask(self, mask)
+

Set the clip’s mask.

+

Returns a copy of the VideoClip with the mask attribute set to +mask, which must be a greyscale (values in 0-1) VideoClip

+
+ +
+
+set_memoize(self, memoize)
+

Sets wheter the clip should keep the last frame read in memory

+
+ +
+
+set_opacity(self, op)
+

Set the opacity/transparency level of the clip.

+

Returns a semi-transparent copy of the clip where the mask is +multiplied by op (any float, normally between 0 and 1).

+
+ +
+
+set_pos(*a, **kw)
+

The function set_pos is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, set_position, instead.

+
+ +
+
+set_position(self, pos, relative=False)
+

Set the clip’s position in compositions.

+

Sets the position that the clip will have when included +in compositions. The argument pos can be either a couple +(x,y) or a function t-> (x,y). x and y mark the +location of the top left corner of the clip, and can be +of several types.

+

Examples

+
>>> clip.set_position((45,150)) # x=45, y=150
+>>>
+>>> # clip horizontally centered, at the top of the picture
+>>> clip.set_position(("center","top"))
+>>>
+>>> # clip is at 40% of the width, 70% of the height:
+>>> clip.set_position((0.4,0.7), relative=True)
+>>>
+>>> # clip's position is horizontally centered, and moving up !
+>>> clip.set_position(lambda t: ('center', 50+t) )
+
+
+
+ +
+
+set_start(self, t, change_end=True)
+

Returns a copy of the clip, with the start attribute set +to t, which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+

If change_end=True and the clip has a duration attribute, +the end atrribute of the clip will be updated to +start+duration.

+

If change_end=False and the clip has a end attribute, +the duration attribute of the clip will be updated to +end-start

+

These changes are also applied to the audio and mask +clips of the current clip, if they exist.

+
+ +
+
+subclip(self, t_start=0, t_end=None)
+

Returns a clip playing the content of the current clip +between times t_start and t_end, which can be expressed +in seconds (15.35), in (min, sec), in (hour, min, sec), or as a +string: ‘01:03:05.35’. +If t_end is not provided, it is assumed to be the duration +of the clip (potentially infinite). +If t_end is a negative value, it is reset to +``clip.duration + t_end. ``. For instance:

+
>>> # cut the last two seconds of the clip:
+>>> newclip = clip.subclip(0,-2)
+
+
+

If t_end is provided or if the clip has a duration attribute, +the duration of the returned clip is set automatically.

+

The mask and audio of the resulting subclip will be +subclips of mask and audio the original clip, if +they exist.

+
+ +
+
+subfx(self, fx, ta=0, tb=None, **kwargs)
+

Apply a transformation to a part of the clip.

+

Returns a new clip in which the function fun (clip->clip) +has been applied to the subclip between times ta and tb +(in seconds).

+

Examples

+
>>> # The scene between times t=3s and t=6s in ``clip`` will be
+>>> # be played twice slower in ``newclip``
+>>> newclip = clip.subapply(lambda c:c.speedx(0.5) , 3,6)
+
+
+
+ +
+
+to_ImageClip(self, t=0, with_mask=True, duration=None)
+

Returns an ImageClip made out of the clip’s frame at time t, +which can be expressed in seconds (15.35), in (min, sec), +in (hour, min, sec), or as a string: ‘01:03:05.35’.

+
+ +
+
+to_RGB(self)
+

Return a non-mask video clip made from the mask video clip.

+
+ +
+
+to_gif(*a, **kw)
+

The function to_gif is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_gif, instead.

+
+ +
+
+to_images_sequence(*a, **kw)
+

The function to_images_sequence is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_images_sequence, instead.

+
+ +
+
+to_mask(self, canal=0)
+

Return a mask a video clip made from the clip.

+
+ +
+
+to_videofile(*a, **kw)
+

The function to_videofile is deprecated and is kept temporarily for backwards compatibility. +Please use the new name, write_videofile, instead.

+
+ +
+
+without_audio(self)
+

Remove the clip’s audio.

+

Return a copy of the clip with audio set to None.

+
+ +
+
+write_gif(self, filename, fps=None, program='imageio', opt='nq', fuzz=1, verbose=True, loop=0, dispose=False, colors=None, tempfiles=False, logger='bar')
+

Write the VideoClip to a GIF file.

+

Converts a VideoClip into an animated GIF using ImageMagick +or ffmpeg.

+ +++ + + + +
Parameters:
+
filename
+

Name of the resulting gif file.

+
+
fps
+

Number of frames per second (see note below). If it +isn’t provided, then the function will look for the clip’s +fps attribute (VideoFileClip, for instance, have one).

+
+
program
+

Software to use for the conversion, either ‘imageio’ (this will use +the library FreeImage through ImageIO), or ‘ImageMagick’, or ‘ffmpeg’.

+
+
opt
+

Optimalization to apply. If program=’imageio’, opt must be either ‘wu’ +(Wu) or ‘nq’ (Neuquant). If program=’ImageMagick’, +either ‘optimizeplus’ or ‘OptimizeTransparency’.

+
+
fuzz
+

(ImageMagick only) Compresses the GIF by considering that +the colors that are less than fuzz% different are in fact +the same.

+
+
tempfiles
+

Writes every frame to a file instead of passing them in the RAM. +Useful on computers with little RAM. Can only be used with +ImageMagick’ or ‘ffmpeg’.

+
+
progress_bar
+

If True, displays a progress bar

+
+
+
+

Notes

+

The gif will be playing the clip in real time (you can +only change the frame rate). If you want the gif to be played +slower than the clip you will use

+
>>> # slow down clip 50% and make it a gif
+>>> myClip.speedx(0.5).to_gif('myClip.gif')
+
+
+
+ +
+
+write_images_sequence(self, nameformat, fps=None, verbose=True, withmask=True, logger='bar')
+

Writes the videoclip to a sequence of image files.

+ +++ + + + + + +
Parameters:
+
nameformat
+

A filename specifying the numerotation format and extension +of the pictures. For instance “frame%03d.png” for filenames +indexed with 3 digits and PNG format. Also possible: +“some_folder/frame%04d.jpeg”, etc.

+
+
fps
+

Number of frames per second to consider when writing the +clip. If not specified, the clip’s fps attribute will +be used if it has one.

+
+
withmask
+

will save the clip’s mask (if any) as an alpha canal (PNGs only).

+
+
verbose
+

Boolean indicating whether to print information.

+
+
logger
+

Either ‘bar’ (progress bar) or None or any Proglog logger.

+
+
+
Returns:
+
names_list
+

A list of all the files generated.

+
+
+
+

Notes

+

The resulting image sequence can be read using e.g. the class +ImageSequenceClip.

+
+ +
+
+write_videofile(self, filename, fps=None, codec=None, bitrate=None, audio=True, audio_fps=44100, preset='medium', audio_nbytes=4, audio_codec=None, audio_bitrate=None, audio_bufsize=2000, temp_audiofile=None, rewrite_audio=True, remove_temp=True, write_logfile=False, verbose=True, threads=None, ffmpeg_params=None, logger='bar')
+

Write the clip to a videofile.

+ +++ + + + +
Parameters:
+
filename
+

Name of the video file to write in. +The extension must correspond to the “codec” used (see below), +or simply be ‘.avi’ (which will work with any codec).

+
+
fps
+

Number of frames per second in the resulting video file. If None is +provided, and the clip has an fps attribute, this fps will be used.

+
+
codec
+

Codec to use for image encoding. Can be any codec supported +by ffmpeg. If the filename is has extension ‘.mp4’, ‘.ogv’, ‘.webm’, +the codec will be set accordingly, but you can still set it if you +don’t like the default. For other extensions, the output filename +must be set accordingly.

+

Some examples of codecs are:

+

'libx264' (default codec for file extension .mp4) +makes well-compressed videos (quality tunable using ‘bitrate’).

+

'mpeg4' (other codec for extension .mp4) can be an alternative +to 'libx264', and produces higher quality videos by default.

+

'rawvideo' (use file extension .avi) will produce +a video of perfect quality, of possibly very huge size.

+

png (use file extension .avi) will produce a video +of perfect quality, of smaller size than with rawvideo.

+

'libvorbis' (use file extension .ogv) is a nice video +format, which is completely free/ open source. However not +everyone has the codecs installed by default on their machine.

+

'libvpx' (use file extension .webm) is tiny a video +format well indicated for web videos (with HTML5). Open source.

+
+
audio
+

Either True, False, or a file name. +If True and the clip has an audio clip attached, this +audio clip will be incorporated as a soundtrack in the movie. +If audio is the name of an audio file, this audio file +will be incorporated as a soundtrack in the movie.

+
+
audiofps
+

frame rate to use when generating the sound.

+
+
temp_audiofile
+

the name of the temporary audiofile to be generated and +incorporated in the the movie, if any.

+
+
audio_codec
+

Which audio codec should be used. Examples are ‘libmp3lame’ +for ‘.mp3’, ‘libvorbis’ for ‘ogg’, ‘libfdk_aac’:’m4a’, +‘pcm_s16le’ for 16-bit wav and ‘pcm_s32le’ for 32-bit wav. +Default is ‘libmp3lame’, unless the video extension is ‘ogv’ +or ‘webm’, at which case the default is ‘libvorbis’.

+
+
audio_bitrate
+

Audio bitrate, given as a string like ‘50k’, ‘500k’, ‘3000k’. +Will determine the size/quality of audio in the output file. +Note that it mainly an indicative goal, the bitrate won’t +necessarily be the this in the final file.

+
+
preset
+

Sets the time that FFMPEG will spend optimizing the compression. +Choices are: ultrafast, superfast, veryfast, faster, fast, medium, +slow, slower, veryslow, placebo. Note that this does not impact +the quality of the video, only the size of the video file. So +choose ultrafast when you are in a hurry and file size does not +matter.

+
+
threads
+

Number of threads to use for ffmpeg. Can speed up the writing of +the video on multicore computers.

+
+
ffmpeg_params
+

Any additional ffmpeg parameters you would like to pass, as a list +of terms, like [‘-option1’, ‘value1’, ‘-option2’, ‘value2’].

+
+
write_logfile
+

If true, will write log files for the audio and the video. +These will be files ending with ‘.log’ with the name of the +output file in them.

+
+
logger
+

Either “bar” for progress bar or None or any Proglog logger.

+
+
verbose (deprecated, kept for compatibility)
+

Formerly used for toggling messages on/off. Use logger=None now.

+
+
+
+

Examples

+
>>> from moviepy.editor import VideoFileClip
+>>> clip = VideoFileClip("myvideo.mp4").subclip(100,120)
+>>> clip.write_videofile("my_new_video.mp4")
+>>> clip.close()
+
+
+
+ +
+ +
+
+moviepy.video.tools.subtitles.file_to_subtitles(filename)[source]
+

Converts a srt file into subtitles.

+

The returned list is of the form [((ta,tb),'some text'),...] +and can be fed to SubtitlesClip.

+

Only works for ‘.srt’ format for the moment.

+
+ +
+
+

Tracking

+
+
+ + +
+ +
+ +
+
+ +
+ +
+ + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/search.html b/v1.0.3/search.html new file mode 100644 index 000000000..5f4d61676 --- /dev/null +++ b/v1.0.3/search.html @@ -0,0 +1,228 @@ + + + + + + + + + + Search — MoviePy documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
    + +
  • »
  • + +
  • Search
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ + + + +
+ +
+ +
+ +
+
+ +
+ +
+

+ © Copyright 2017, Zulko. + +

+
+ + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
+
+
+ +
+ +
+ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v1.0.3/searchindex.js b/v1.0.3/searchindex.js new file mode 100644 index 000000000..272773fad --- /dev/null +++ b/v1.0.3/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["FAQ","advanced_tools/advanced_tools","docker","examples/compo_from_image","examples/dancing_knights","examples/example_with_sound","examples/examples","examples/headblur","examples/logo","examples/masked_credits","examples/moving_letters","examples/painting_effect","examples/quick_recipes","examples/several_characters","examples/star_worms","examples/the_end","examples/ukulele_concerto","gallery","getting_started/audioclips","getting_started/compositing","getting_started/effects","getting_started/efficient_moviepy","getting_started/getting_started","getting_started/quick_presentation","getting_started/videoclips","getting_started/working_with_matplotlib","index","install","opencv_instructions","ref/AudioClip","ref/Clip","ref/VideoClip/VideoClip","ref/audiofx","ref/audiofx/moviepy.audio.fx.all.audio_fadein","ref/audiofx/moviepy.audio.fx.all.audio_fadeout","ref/audiofx/moviepy.audio.fx.all.audio_loop","ref/audiofx/moviepy.audio.fx.all.audio_normalize","ref/audiofx/moviepy.audio.fx.all.volumex","ref/audiotools","ref/code_origanization","ref/decorators","ref/ffmpeg","ref/ref","ref/videofx","ref/videofx/moviepy.video.fx.all.accel_decel","ref/videofx/moviepy.video.fx.all.blackwhite","ref/videofx/moviepy.video.fx.all.blink","ref/videofx/moviepy.video.fx.all.colorx","ref/videofx/moviepy.video.fx.all.crop","ref/videofx/moviepy.video.fx.all.even_size","ref/videofx/moviepy.video.fx.all.fadein","ref/videofx/moviepy.video.fx.all.fadeout","ref/videofx/moviepy.video.fx.all.freeze","ref/videofx/moviepy.video.fx.all.freeze_region","ref/videofx/moviepy.video.fx.all.gamma_corr","ref/videofx/moviepy.video.fx.all.headblur","ref/videofx/moviepy.video.fx.all.invert_colors","ref/videofx/moviepy.video.fx.all.loop","ref/videofx/moviepy.video.fx.all.lum_contrast","ref/videofx/moviepy.video.fx.all.make_loopable","ref/videofx/moviepy.video.fx.all.margin","ref/videofx/moviepy.video.fx.all.mask_and","ref/videofx/moviepy.video.fx.all.mask_color","ref/videofx/moviepy.video.fx.all.mask_or","ref/videofx/moviepy.video.fx.all.mirror_x","ref/videofx/moviepy.video.fx.all.mirror_y","ref/videofx/moviepy.video.fx.all.painting","ref/videofx/moviepy.video.fx.all.resize","ref/videofx/moviepy.video.fx.all.rotate","ref/videofx/moviepy.video.fx.all.scroll","ref/videofx/moviepy.video.fx.all.speedx","ref/videofx/moviepy.video.fx.all.supersample","ref/videofx/moviepy.video.fx.all.time_mirror","ref/videofx/moviepy.video.fx.all.time_symmetrize","ref/videotools"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["FAQ.rst","advanced_tools/advanced_tools.rst","docker.rst","examples/compo_from_image.rst","examples/dancing_knights.rst","examples/example_with_sound.rst","examples/examples.rst","examples/headblur.rst","examples/logo.rst","examples/masked_credits.rst","examples/moving_letters.rst","examples/painting_effect.rst","examples/quick_recipes.rst","examples/several_characters.rst","examples/star_worms.rst","examples/the_end.rst","examples/ukulele_concerto.rst","gallery.rst","getting_started/audioclips.rst","getting_started/compositing.rst","getting_started/effects.rst","getting_started/efficient_moviepy.rst","getting_started/getting_started.rst","getting_started/quick_presentation.rst","getting_started/videoclips.rst","getting_started/working_with_matplotlib.rst","index.rst","install.rst","opencv_instructions.rst","ref/AudioClip.rst","ref/Clip.rst","ref/VideoClip/VideoClip.rst","ref/audiofx.rst","ref/audiofx/moviepy.audio.fx.all.audio_fadein.rst","ref/audiofx/moviepy.audio.fx.all.audio_fadeout.rst","ref/audiofx/moviepy.audio.fx.all.audio_loop.rst","ref/audiofx/moviepy.audio.fx.all.audio_normalize.rst","ref/audiofx/moviepy.audio.fx.all.volumex.rst","ref/audiotools.rst","ref/code_origanization.rst","ref/decorators.rst","ref/ffmpeg.rst","ref/ref.rst","ref/videofx.rst","ref/videofx/moviepy.video.fx.all.accel_decel.rst","ref/videofx/moviepy.video.fx.all.blackwhite.rst","ref/videofx/moviepy.video.fx.all.blink.rst","ref/videofx/moviepy.video.fx.all.colorx.rst","ref/videofx/moviepy.video.fx.all.crop.rst","ref/videofx/moviepy.video.fx.all.even_size.rst","ref/videofx/moviepy.video.fx.all.fadein.rst","ref/videofx/moviepy.video.fx.all.fadeout.rst","ref/videofx/moviepy.video.fx.all.freeze.rst","ref/videofx/moviepy.video.fx.all.freeze_region.rst","ref/videofx/moviepy.video.fx.all.gamma_corr.rst","ref/videofx/moviepy.video.fx.all.headblur.rst","ref/videofx/moviepy.video.fx.all.invert_colors.rst","ref/videofx/moviepy.video.fx.all.loop.rst","ref/videofx/moviepy.video.fx.all.lum_contrast.rst","ref/videofx/moviepy.video.fx.all.make_loopable.rst","ref/videofx/moviepy.video.fx.all.margin.rst","ref/videofx/moviepy.video.fx.all.mask_and.rst","ref/videofx/moviepy.video.fx.all.mask_color.rst","ref/videofx/moviepy.video.fx.all.mask_or.rst","ref/videofx/moviepy.video.fx.all.mirror_x.rst","ref/videofx/moviepy.video.fx.all.mirror_y.rst","ref/videofx/moviepy.video.fx.all.painting.rst","ref/videofx/moviepy.video.fx.all.resize.rst","ref/videofx/moviepy.video.fx.all.rotate.rst","ref/videofx/moviepy.video.fx.all.scroll.rst","ref/videofx/moviepy.video.fx.all.speedx.rst","ref/videofx/moviepy.video.fx.all.supersample.rst","ref/videofx/moviepy.video.fx.all.time_mirror.rst","ref/videofx/moviepy.video.fx.all.time_symmetrize.rst","ref/videotools.rst"],objects:{"moviepy.Clip":{Clip:[30,0,1,""]},"moviepy.Clip.Clip":{close:[30,1,1,""],copy:[30,1,1,""],cutout:[30,1,1,""],fl:[30,1,1,""],fl_time:[30,1,1,""],fx:[30,1,1,""],get_frame:[30,1,1,""],is_playing:[30,1,1,""],iter_frames:[30,1,1,""],set_duration:[30,1,1,""],set_end:[30,1,1,""],set_fps:[30,1,1,""],set_ismask:[30,1,1,""],set_make_frame:[30,1,1,""],set_memoize:[30,1,1,""],set_start:[30,1,1,""],subclip:[30,1,1,""]},"moviepy.audio.AudioClip":{AudioClip:[29,0,1,""],CompositeAudioClip:[29,0,1,""]},"moviepy.audio.AudioClip.AudioClip":{close:[29,1,1,""],copy:[29,1,1,""],cutout:[29,1,1,""],fl:[29,1,1,""],fl_time:[29,1,1,""],fx:[29,1,1,""],get_frame:[29,1,1,""],is_playing:[29,1,1,""],iter_chunks:[29,1,1,""],iter_frames:[29,1,1,""],set_duration:[29,1,1,""],set_end:[29,1,1,""],set_fps:[29,1,1,""],set_ismask:[29,1,1,""],set_make_frame:[29,1,1,""],set_memoize:[29,1,1,""],set_start:[29,1,1,""],subclip:[29,1,1,""],to_audiofile:[29,1,1,""],to_soundarray:[29,1,1,""],write_audiofile:[29,1,1,""]},"moviepy.audio.AudioClip.CompositeAudioClip":{close:[29,1,1,""],copy:[29,1,1,""],cutout:[29,1,1,""],fl:[29,1,1,""],fl_time:[29,1,1,""],fx:[29,1,1,""],get_frame:[29,1,1,""],is_playing:[29,1,1,""],iter_chunks:[29,1,1,""],iter_frames:[29,1,1,""],set_duration:[29,1,1,""],set_end:[29,1,1,""],set_fps:[29,1,1,""],set_ismask:[29,1,1,""],set_make_frame:[29,1,1,""],set_memoize:[29,1,1,""],set_start:[29,1,1,""],subclip:[29,1,1,""],to_audiofile:[29,1,1,""],to_soundarray:[29,1,1,""],write_audiofile:[29,1,1,""]},"moviepy.audio.fx.all":{audio_fadein:[33,2,1,""],audio_fadeout:[34,2,1,""],audio_loop:[35,2,1,""],audio_normalize:[36,2,1,""],volumex:[37,2,1,""]},"moviepy.audio.io.AudioFileClip":{AudioFileClip:[29,0,1,""]},"moviepy.audio.io.AudioFileClip.AudioFileClip":{close:[29,1,1,""],copy:[29,1,1,""],coreader:[29,1,1,""],cutout:[29,1,1,""],fl:[29,1,1,""],fl_time:[29,1,1,""],fx:[29,1,1,""],get_frame:[29,1,1,""],is_playing:[29,1,1,""],iter_chunks:[29,1,1,""],iter_frames:[29,1,1,""],set_duration:[29,1,1,""],set_end:[29,1,1,""],set_fps:[29,1,1,""],set_ismask:[29,1,1,""],set_make_frame:[29,1,1,""],set_memoize:[29,1,1,""],set_start:[29,1,1,""],subclip:[29,1,1,""],to_audiofile:[29,1,1,""],to_soundarray:[29,1,1,""],write_audiofile:[29,1,1,""]},"moviepy.decorators":{add_mask_if_none:[40,2,1,""],apply_to_audio:[40,2,1,""],apply_to_mask:[40,2,1,""],audio_video_fx:[40,2,1,""],convert_masks_to_RGB:[40,2,1,""],convert_to_seconds:[40,2,1,""],outplace:[40,2,1,""],preprocess_args:[40,2,1,""],requires_duration:[40,2,1,""],use_clip_fps_by_default:[40,2,1,""]},"moviepy.video.VideoClip":{ColorClip:[31,0,1,""],ImageClip:[31,0,1,""],TextClip:[31,0,1,""],VideoClip:[31,0,1,""]},"moviepy.video.VideoClip.ColorClip":{add_mask:[31,1,1,""],afx:[31,1,1,""],blit_on:[31,1,1,""],close:[31,1,1,""],copy:[31,1,1,""],cutout:[31,1,1,""],fl:[31,1,1,""],fl_image:[31,1,1,""],fl_time:[31,1,1,""],fx:[31,1,1,""],get_frame:[31,1,1,""],is_playing:[31,1,1,""],iter_frames:[31,1,1,""],on_color:[31,1,1,""],save_frame:[31,1,1,""],set_audio:[31,1,1,""],set_duration:[31,1,1,""],set_end:[31,1,1,""],set_fps:[31,1,1,""],set_ismask:[31,1,1,""],set_make_frame:[31,1,1,""],set_mask:[31,1,1,""],set_memoize:[31,1,1,""],set_opacity:[31,1,1,""],set_pos:[31,1,1,""],set_position:[31,1,1,""],set_start:[31,1,1,""],subclip:[31,1,1,""],subfx:[31,1,1,""],to_ImageClip:[31,1,1,""],to_RGB:[31,1,1,""],to_gif:[31,1,1,""],to_images_sequence:[31,1,1,""],to_mask:[31,1,1,""],to_videofile:[31,1,1,""],without_audio:[31,1,1,""],write_gif:[31,1,1,""],write_images_sequence:[31,1,1,""],write_videofile:[31,1,1,""]},"moviepy.video.VideoClip.ImageClip":{add_mask:[31,1,1,""],afx:[31,1,1,""],blit_on:[31,1,1,""],close:[31,1,1,""],copy:[31,1,1,""],cutout:[31,1,1,""],fl:[31,1,1,""],fl_image:[31,1,1,""],fl_time:[31,1,1,""],fx:[31,1,1,""],get_frame:[31,1,1,""],is_playing:[31,1,1,""],iter_frames:[31,1,1,""],on_color:[31,1,1,""],save_frame:[31,1,1,""],set_audio:[31,1,1,""],set_duration:[31,1,1,""],set_end:[31,1,1,""],set_fps:[31,1,1,""],set_ismask:[31,1,1,""],set_make_frame:[31,1,1,""],set_mask:[31,1,1,""],set_memoize:[31,1,1,""],set_opacity:[31,1,1,""],set_pos:[31,1,1,""],set_position:[31,1,1,""],set_start:[31,1,1,""],subclip:[31,1,1,""],subfx:[31,1,1,""],to_ImageClip:[31,1,1,""],to_RGB:[31,1,1,""],to_gif:[31,1,1,""],to_images_sequence:[31,1,1,""],to_mask:[31,1,1,""],to_videofile:[31,1,1,""],without_audio:[31,1,1,""],write_gif:[31,1,1,""],write_images_sequence:[31,1,1,""],write_videofile:[31,1,1,""]},"moviepy.video.VideoClip.TextClip":{add_mask:[31,1,1,""],afx:[31,1,1,""],blit_on:[31,1,1,""],close:[31,1,1,""],copy:[31,1,1,""],cutout:[31,1,1,""],fl:[31,1,1,""],fl_image:[31,1,1,""],fl_time:[31,1,1,""],fx:[31,1,1,""],get_frame:[31,1,1,""],is_playing:[31,1,1,""],iter_frames:[31,1,1,""],list:[31,4,1,""],on_color:[31,1,1,""],save_frame:[31,1,1,""],search:[31,4,1,""],set_audio:[31,1,1,""],set_duration:[31,1,1,""],set_end:[31,1,1,""],set_fps:[31,1,1,""],set_ismask:[31,1,1,""],set_make_frame:[31,1,1,""],set_mask:[31,1,1,""],set_memoize:[31,1,1,""],set_opacity:[31,1,1,""],set_pos:[31,1,1,""],set_position:[31,1,1,""],set_start:[31,1,1,""],subclip:[31,1,1,""],subfx:[31,1,1,""],to_ImageClip:[31,1,1,""],to_RGB:[31,1,1,""],to_gif:[31,1,1,""],to_images_sequence:[31,1,1,""],to_mask:[31,1,1,""],to_videofile:[31,1,1,""],without_audio:[31,1,1,""],write_gif:[31,1,1,""],write_images_sequence:[31,1,1,""],write_videofile:[31,1,1,""]},"moviepy.video.VideoClip.VideoClip":{add_mask:[31,1,1,""],afx:[31,1,1,""],blit_on:[31,1,1,""],close:[31,1,1,""],copy:[31,1,1,""],cutout:[31,1,1,""],fl:[31,1,1,""],fl_image:[31,1,1,""],fl_time:[31,1,1,""],fx:[31,1,1,""],get_frame:[31,1,1,""],is_playing:[31,1,1,""],iter_frames:[31,1,1,""],on_color:[31,1,1,""],save_frame:[31,1,1,""],set_audio:[31,1,1,""],set_duration:[31,1,1,""],set_end:[31,1,1,""],set_fps:[31,1,1,""],set_ismask:[31,1,1,""],set_make_frame:[31,1,1,""],set_mask:[31,1,1,""],set_memoize:[31,1,1,""],set_opacity:[31,1,1,""],set_pos:[31,1,1,""],set_position:[31,1,1,""],set_start:[31,1,1,""],subclip:[31,1,1,""],subfx:[31,1,1,""],to_ImageClip:[31,1,1,""],to_RGB:[31,1,1,""],to_gif:[31,1,1,""],to_images_sequence:[31,1,1,""],to_mask:[31,1,1,""],to_videofile:[31,1,1,""],without_audio:[31,1,1,""],write_gif:[31,1,1,""],write_images_sequence:[31,1,1,""],write_videofile:[31,1,1,""]},"moviepy.video.compositing.CompositeVideoClip":{CompositeVideoClip:[31,0,1,""]},"moviepy.video.compositing.CompositeVideoClip.CompositeVideoClip":{add_mask:[31,1,1,""],afx:[31,1,1,""],blit_on:[31,1,1,""],close:[31,1,1,""],copy:[31,1,1,""],cutout:[31,1,1,""],fl:[31,1,1,""],fl_image:[31,1,1,""],fl_time:[31,1,1,""],fx:[31,1,1,""],get_frame:[31,1,1,""],is_playing:[31,1,1,""],iter_frames:[31,1,1,""],on_color:[31,1,1,""],playing_clips:[31,1,1,""],save_frame:[31,1,1,""],set_audio:[31,1,1,""],set_duration:[31,1,1,""],set_end:[31,1,1,""],set_fps:[31,1,1,""],set_ismask:[31,1,1,""],set_make_frame:[31,1,1,""],set_mask:[31,1,1,""],set_memoize:[31,1,1,""],set_opacity:[31,1,1,""],set_pos:[31,1,1,""],set_position:[31,1,1,""],set_start:[31,1,1,""],subclip:[31,1,1,""],subfx:[31,1,1,""],to_ImageClip:[31,1,1,""],to_RGB:[31,1,1,""],to_gif:[31,1,1,""],to_images_sequence:[31,1,1,""],to_mask:[31,1,1,""],to_videofile:[31,1,1,""],without_audio:[31,1,1,""],write_gif:[31,1,1,""],write_images_sequence:[31,1,1,""],write_videofile:[31,1,1,""]},"moviepy.video.fx.all":{accel_decel:[44,2,1,""],blackwhite:[45,2,1,""],blink:[46,2,1,""],colorx:[47,2,1,""],crop:[48,2,1,""],even_size:[49,2,1,""],fadein:[50,2,1,""],fadeout:[51,2,1,""],freeze:[52,2,1,""],freeze_region:[53,2,1,""],gamma_corr:[54,2,1,""],headblur:[55,2,1,""],invert_colors:[56,2,1,""],loop:[57,2,1,""],lum_contrast:[58,2,1,""],make_loopable:[59,2,1,""],margin:[60,2,1,""],mask_and:[61,2,1,""],mask_color:[62,2,1,""],mask_or:[63,2,1,""],mirror_x:[64,2,1,""],mirror_y:[65,2,1,""],painting:[66,2,1,""],resize:[67,2,1,""],rotate:[68,2,1,""],scroll:[69,2,1,""],speedx:[70,2,1,""],supersample:[71,2,1,""],time_mirror:[72,2,1,""],time_symmetrize:[73,2,1,""]},"moviepy.video.io":{ffmpeg_tools:[41,3,0,"-"]},"moviepy.video.io.VideoFileClip":{VideoFileClip:[31,0,1,""]},"moviepy.video.io.VideoFileClip.VideoFileClip":{add_mask:[31,1,1,""],afx:[31,1,1,""],blit_on:[31,1,1,""],close:[31,1,1,""],copy:[31,1,1,""],cutout:[31,1,1,""],fl:[31,1,1,""],fl_image:[31,1,1,""],fl_time:[31,1,1,""],fx:[31,1,1,""],get_frame:[31,1,1,""],is_playing:[31,1,1,""],iter_frames:[31,1,1,""],on_color:[31,1,1,""],save_frame:[31,1,1,""],set_audio:[31,1,1,""],set_duration:[31,1,1,""],set_end:[31,1,1,""],set_fps:[31,1,1,""],set_ismask:[31,1,1,""],set_make_frame:[31,1,1,""],set_mask:[31,1,1,""],set_memoize:[31,1,1,""],set_opacity:[31,1,1,""],set_pos:[31,1,1,""],set_position:[31,1,1,""],set_start:[31,1,1,""],subclip:[31,1,1,""],subfx:[31,1,1,""],to_ImageClip:[31,1,1,""],to_RGB:[31,1,1,""],to_gif:[31,1,1,""],to_images_sequence:[31,1,1,""],to_mask:[31,1,1,""],to_videofile:[31,1,1,""],without_audio:[31,1,1,""],write_gif:[31,1,1,""],write_images_sequence:[31,1,1,""],write_videofile:[31,1,1,""]},"moviepy.video.io.ffmpeg_tools":{ffmpeg_extract_audio:[41,2,1,""],ffmpeg_extract_subclip:[41,2,1,""],ffmpeg_merge_video_audio:[41,2,1,""],ffmpeg_movie_from_frames:[41,2,1,""],ffmpeg_resize:[41,2,1,""]},"moviepy.video.tools":{credits:[74,3,0,"-"],drawing:[74,3,0,"-"],subtitles:[74,3,0,"-"]},"moviepy.video.tools.credits":{credits1:[74,2,1,""]},"moviepy.video.tools.drawing":{blit:[74,2,1,""],circle:[74,2,1,""],color_gradient:[74,2,1,""],color_split:[74,2,1,""]},"moviepy.video.tools.subtitles":{SubtitlesClip:[74,0,1,""],file_to_subtitles:[74,2,1,""]},"moviepy.video.tools.subtitles.SubtitlesClip":{add_mask:[74,1,1,""],afx:[74,1,1,""],blit_on:[74,1,1,""],close:[74,1,1,""],copy:[74,1,1,""],cutout:[74,1,1,""],fl:[74,1,1,""],fl_image:[74,1,1,""],fl_time:[74,1,1,""],fx:[74,1,1,""],get_frame:[74,1,1,""],in_subclip:[74,1,1,""],is_playing:[74,1,1,""],iter_frames:[74,1,1,""],on_color:[74,1,1,""],save_frame:[74,1,1,""],set_audio:[74,1,1,""],set_duration:[74,1,1,""],set_end:[74,1,1,""],set_fps:[74,1,1,""],set_ismask:[74,1,1,""],set_make_frame:[74,1,1,""],set_mask:[74,1,1,""],set_memoize:[74,1,1,""],set_opacity:[74,1,1,""],set_pos:[74,1,1,""],set_position:[74,1,1,""],set_start:[74,1,1,""],subclip:[74,1,1,""],subfx:[74,1,1,""],to_ImageClip:[74,1,1,""],to_RGB:[74,1,1,""],to_gif:[74,1,1,""],to_images_sequence:[74,1,1,""],to_mask:[74,1,1,""],to_videofile:[74,1,1,""],without_audio:[74,1,1,""],write_gif:[74,1,1,""],write_images_sequence:[74,1,1,""],write_videofile:[74,1,1,""]},moviepy:{decorators:[40,3,0,"-"]}},objnames:{"0":["py","class","Python class"],"1":["py","method","Python method"],"2":["py","function","Python function"],"3":["py","module","Python module"],"4":["py","staticmethod","Python static method"]},objtypes:{"0":"py:class","1":"py:method","2":"py:function","3":"py:module","4":"py:staticmethod"},terms:{"02f":4,"03d":[31,74],"04d":[31,74],"0db":36,"1000k":4,"10px":19,"10s":[20,23],"15s":21,"15th":6,"16bit":29,"200px":48,"20s":[3,20],"3000k":[7,11,29,31,74],"30s":14,"32bit":29,"4000k":4,"440hz":29,"500k":[29,31,74],"50k":[29,31,74],"720x405":0,"8bit":29,"boolean":[29,30,31,74],"byte":29,"case":[0,19,20,21,23,24,27,30,31,32,40,55,62,74],"class":[19,20,21,24,29,30,39,42,74],"default":[0,19,21,24,29,30,31,39,50,51,64,65,74],"donz\u00e9":4,"export":[22,23,25],"final":[4,5,7,9,11,14,15,16,19,20,23,28,31,51,53,74],"float":[12,19,29,31,48,67,74],"function":[7,10,14,19,20,21,23,24,25,29,30,31,32,39,40,42,43,67,68,74],"gen\u00e8v":4,"import":[3,4,5,7,9,10,11,12,14,15,16,18,19,20,21,23,24,25,28,29,30,31,32,35,36,43,74],"int":[14,15,16,20,29,30,31,74],"long":[3,14,19,21],"mus\u00e9":4,"new":[16,19,20,23,28,29,30,31,39,41,43,48,60,62,67,74],"public":[3,14],"return":[3,10,12,14,20,24,25,29,30,31,33,34,35,36,37,40,48,55,56,57,61,62,63,67,70,72,73,74],"short":21,"si\u00e8cl":4,"static":31,"throw":28,"true":[5,9,14,19,21,24,25,29,30,31,45,67,68,74],"try":[21,29],"universit\u00e9":4,"voil\u00e0":28,"while":[8,20,21,24,43,53],AND:[5,7,10,11,16],ARE:11,Age:4,And:[4,9,14,23,28],But:19,FOR:14,FPS:31,For:[17,19,20,21,23,24,27,29,30,31,43,50,51,74],GOING:10,HAS:[7,16],Has:31,INTO:14,NOT:[16,20,23],One:[14,52,68,74],THAT:10,THE:[4,5,7,10,11,14,16],That:[11,31],The:[5,6,11,16,19,20,23,24,25,26,27,28,29,30,31,32,39,42,43,55,56,61,63,67,70,72,74],Then:[7,15,21,24,39],There:[19,20,24,26],These:[0,21,22,29,30,31,40,74],USE:10,Use:[29,30,31,40,74],Useful:[29,30,31,74],WITH:[14,16],Will:[24,29,31,40,46,74],With:[17,21,52],__version__:28,abl:21,abort:21,about:[3,14,18,19,20,27],abov:[15,19,24,25,27,48],abrupt:44,abs:10,accel_decel:42,accept:[20,31,48,74],accord:6,accordingli:[31,74],account:[20,31],ach:31,achiev:[21,36],aclip1:19,aclip2:19,aclip3:19,acodec:41,act:21,action:26,actual:[5,8,15,21,31,52,74],adapt:21,add:[4,5,7,11,14,17,19,23,31,39,40,74],add_mask:[11,15,31,68,74],add_mask_if_non:40,adding:[17,21,24],addit:[29,31,74],advanc:[0,20,23,26,27,39,74],advantag:[21,25],advic:28,affect:[31,32],after:[7,11,14,19,29,31],afx:[20,31,32,35,36,74],ago:14,algorithm:[11,31],align:[14,24,31],all:[15,17,19,20,21,24,29,30,31,32,39,40,42,43,74],allow:[23,25,32,43,45],almost:[23,41],along:74,alpha:[24,31,74],alphabet:74,alphanumer:24,alreadi:[2,18,20,21,24,27,29,30,31,43,74],also:[0,2,9,14,17,18,19,20,21,23,24,26,27,29,30,31,39,40,62,74],altern:[18,31,74],although:23,alwai:[24,28],american:17,amiri:[10,11,15,16,24],analysi:[17,23],analyz:4,angl:[10,68],ani:[21,23,24,25,28,29,30,31,39,48,61,63,66,70,72,74],anim:[10,16,21,23,31,53,73,74],annot:14,annotated_clip:14,anoth:[14,21,23,24,29,30,31,50,51,74],anount:66,answer:[0,21],antialias:74,api:3,appear:[11,19,21,23,31,50,51],appli:[20,29,30,31,32,37,39,40,70,72,73,74],applic:21,applif:68,apply_to:[14,29,30,31,64,65,69,74],apply_to_audio:40,apply_to_mask:[40,67],approxim:[14,39],arbitrari:[29,30,48,74],area:[7,14],aren:27,arent:19,arg:[20,29,30,31,74],args1:20,args2:20,args3:20,argument:[20,21,29,31,74],armur:4,around:[7,17,21,24,60],arrai:[10,11,14,24,25,29,30,31,61,63,74],arriv:[10,33],ash:14,ask:[0,21,26],aspect:[3,20,31],assembl:[4,5,16],assemble_cut:17,assign:18,associ:[23,74],assum:[29,30,31,74],astyp:[12,14,74],atrribut:[29,30,31,74],attach:[24,31,32,74],attempt:17,attribut:[17,19,24,29,30,31,72,74],audio:[3,4,7,9,15,16,20,21,22,23,24,29,30,31,39,40,41,42,70,72,73,74],audio_bitr:[4,11,31,74],audio_buffers:31,audio_bufs:[31,74],audio_codec:[31,74],audio_fadein:[4,32,42],audio_fadeout:[4,32,42],audio_fp:[31,74],audio_loop:42,audio_nbyt:[31,74],audio_norm:42,audio_period:4,audio_video_fx:40,audioarrayclip:24,audioc:19,audioclip:[23,24,30,31,32,35,39,40,42,74],audiofil:[18,31,74],audiofileclip:[4,18,21,24,35,39,42],audiofp:[31,74],audiotrack:19,audrei:[5,11],auto:31,autocomplet:21,autogener:31,automag:31,automat:[4,17,18,19,20,21,23,24,27,29,30,31,39,52,55,67,70,73,74],autoplai:[21,25],autos:31,avail:[21,23,31],avconv:23,average_imag:12,average_test:12,avi:[5,7,9,10,11,14,15,16,18,20,24,31,74],avoid:21,awai:28,b_1:[29,30,31,74],b_2:[29,30,31,74],b_3:[29,30,31,74],b_i:[29,30,31,74],back:29,backend:27,background:[7,14,15,16,17,19,24,31,74],backward:[20,29,30,31,72,73,74],bad:3,badl:[9,15],bar:[12,29,31,41,74],base:[4,10,17,24,29,30,31,39,74],bash:2,basic:[17,26],beach:17,becaus:[5,7,24,32,43],becom:[15,21,25,56],been:[3,4,7,14,23,31,74],befor:[9,11,14,19,20,24,27,29,31,40,74],beforehand:74,begin:[15,17,31,50],behind:21,being:[21,29,30,31,62,74],belong:24,below:[24,25,29,30,31,74],best:[21,23,24],better:[20,21,23,24,27,42],between:[19,20,24,29,30,31,41,50,51,61,62,63,74],bg_color:[4,7,14,31],bicub:[31,68],big:[21,24],bigger:[19,31,74],biggest:19,bilinear:[29,30,31,68,74],binari:27,bind:[25,41],biphone3:5,bit:[24,29,31,74],bitrat:[4,7,24,29,31,41,74],blabla:[13,74],black:[4,8,11,15,16,20,24,31,39,45,50,51,56,66,68,74],black_whit:[20,23],blackwhit:42,blank:[14,74],blink:42,blit:[31,74],blit_on:[31,74],block:[21,24],blog:[17,24],blue:20,blur:[6,15,55,71,74],blurr:55,blurred_video:12,blurredchaplin:7,blurri:[7,74],bold:[10,14,15,24],border:[17,19,68,74],both:[20,37,39],bottom:[14,16,29,30,31,60,74],bound:29,boundari:48,branch:27,briefli:1,bright:47,browser:21,buffers:29,bug:[26,28],buggi:31,build:[2,24],build_exampl:28,build_new_python_support:28,burnt:14,cach:21,calcul:52,californian:17,call:[20,21,23,24,27,31,39,74],calo:3,camera:17,can:[1,2,4,11,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,37,39,40,43,47,50,51,52,53,57,59,61,62,63,67,68,70,74],canal:[31,74],cannot:23,capabl:17,caption:31,care:3,carri:[5,19,24],cascad:10,categori:20,cell:[21,25],center:[4,7,9,10,14,15,16,19,23,31,48,74],central:[23,24],centuri:[6,7],chain:[29,30,31,74],chang:[0,2,3,29,30,31,68,74],change_end:[29,30,31,74],channel:[20,29,45],chaplin:7,chaplin_txi:7,charact:6,charad:[5,11],charadephon:5,chariti:0,check:[21,28],cheer:17,choic:[31,74],choos:[19,24,29,31,53,74],chunk:29,chunk_dur:29,chunksiz:29,circa:21,circl:[15,24,55,74],circular:74,clean:21,clear:25,clearer:[20,29,30,31,74],clearli:39,click:21,clip1:19,clip2:[19,31],clip3:19,clip4:19,clip:[0,4,5,6,7,9,10,11,12,14,15,16,17,22,23,25,27,29,32,33,34,35,36,37,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74],clip_aft:11,clip_arrai:19,clip_befor:11,clip_blur:[7,12],clip_left:5,clip_right:5,clip_txt:14,clips_arrai:[4,19,39],close:[29,30,31,74],clsing:21,cmake:28,code:[2,4,5,9,10,11,14,16,17,24,26,28,42],codec:[0,5,10,11,16,24,29,31,74],col1:[5,14,15,74],col2:[5,14,15,74],col:[31,74],col_opac:[16,31,74],collect:[31,39],collector:21,color:[4,7,10,11,14,15,16,19,21,23,24,31,45,47,50,51,56,60,62,66,74],color_gradi:[14,74],color_split:[5,74],colorclip:[24,42],colorx:[9,20,42,43],com:4,combat:4,combin:[17,48],come:57,command:[2,27],comment:[7,74],common:[4,20,21,23,24,26],comp_clip:3,compar:[21,31,74],compat:[29,31,74],complain:31,complet:[1,31,74],complex:[3,14,19,21],complic:[14,23,29,30],compo:19,compon:24,compos:[8,14,18,19,23,24,29,31,39],composecent:14,composit:[3,26,29,30,31,39,46,50,51,74],compositeaudioclip:[19,42],compositesoundclip:29,compositevideoclip:[3,4,5,9,10,11,14,15,16,21,23,39,42,74],compositionclip:19,compress:[31,74],compris:24,comput:[0,3,14,20,21,24,27,28,29,30,31,67,70,74],concat:19,concaten:[7,10,11,14,18,26],concatenate_audioclip:19,concatenate_videoclip:[4,7,10,11,14,19,39],config:39,config_default:[27,39],connect:2,consequ:15,conserv:[24,48,67],consid:[0,20,31,74],consol:[28,39],constant:20,constant_s:[31,74],construct:21,contain:[2,5,19,24,29,30,31,39,74],content:[17,29,30,31,74],context:[21,29,31],contour:[11,19,24,31,74],contrast:58,contrast_thr:58,control:23,conveni:20,convers:[29,31,74],convert:[7,23,24,27,29,31,39,40,74],convert_masks_to_rgb:40,convert_to_second:40,cool:10,cooltexteffect:10,coordin:[19,21,48,74],copi:[16,21,29,30,31,40,41,74],core:20,coread:[5,11,21,29],corner:[16,31,48,74],correct:[39,54,58],correspond:[20,31,74],cos:10,could:[3,23],coupl:[14,20,31,74],courier:31,cover:74,creat:[10,11,14,17,21,22,23,25,26,31,39,40,73,74],creation:[17,23],creativ:4,creator:17,credit:[4,6,23,42,69],creditfil:74,credits1:[9,74],credits_mountain:9,crew:31,crop:[4,5,14,16,42,43,49,74],crope:[17,48],cross:[50,51,59],crossfad:[50,51],crossfadein:[11,19],crossfadeout:11,crowd:17,crt_phosphor:45,current:[0,17,20,29,30,31,38,44,53,57,70,72,73,74],curtain:21,custom:[18,19,23,24],cut:[4,5,9,16,17,18,20,23,24,26,29,30,31,74],cute:23,cutout:[29,30,31,74],cv2:[27,28],cvc:[10,14],cvsec:11,d_off:46,d_on:46,dai:[14,17],damp:10,danc:6,dancing_knight:4,darken:[14,20,23],darker:9,dat:7,data:7,deal:[55,74],decid:19,declar:24,decor:[27,39,42],decreas:47,dedic:0,deep:14,def:[10,12,14,20,24,25],defin:[10,24,39,53,55,68,74],deg:68,degre:68,demonstr:17,depend:[7,20,21],deprec:[0,29,31,41,74],deriv:21,desatur:45,descript:[5,14,23],design:23,desir:70,desired_height:31,desired_width:31,detail:[23,27,29,31,39],detect:[23,27],determin:[29,31,44,74],develop:[0,17,23,27],didier:74,differ:[3,9,14,17,18,21,23,24,29,31,32,45,74],difficult:74,digit:[31,41,74],dimens:[0,24,31,49,74],direct:[60,74],directli:[17,21,23,32],directori:[2,28],dirti:21,disappear:[11,46,50,51],disc:74,displai:[20,21,24,31,46,74],dispos:[31,74],distanc:[24,62],distant:23,distort:3,divid:74,django:23,doc:[19,31,32,39],docker:26,dockerfil:2,doctor:4,doctorvoxoffici:4,document:[21,24,39,42],doe:[20,29,30,31,74],doesn:31,doing:[10,21],domain:[3,14],don:[0,15,19,20,21,24,27,31,74],done:[7,19,21,23],dot:10,doubl:[20,37],douceamb:16,down:[4,9,19,20,21,23,31,44,74],download:[0,4,26],downsiz:[0,16,19,24],draw:[5,11,14,15,23,24,42,60],drawback:21,drawn:31,dst:14,dtype:[12,29,30,31,74],dump:7,duplic:6,durand:74,durat:[4,5,12,14,15,20,24,25,29,30,31,33,34,35,40,50,51,52,57,59,61,63,72,74],dure:[20,27,35],each:[3,4,10,16,17,20,21,24,29,30,31,46,71,74],earli:21,earlier:21,eas:40,easi:[4,20,23,28,29,30,31,39,43,74],easier:21,easili:[7,39],east:31,edg:11,edit:[1,4,10,18,19,21,22,23,24,26,27,29,30,31,39,74],edited_left:4,edited_right:4,editor:[3,4,5,7,9,10,11,12,14,15,16,18,19,20,23,24,25,29,30,31,32,35,36,37,39,43,74],effect:[6,10,14,19,21,22,23,24,26,29,30,31,37,39,40,44,62,70,72,73,74],effect_1:20,effect_2:20,effect_3:20,effici:[22,23,24,39],either:[20,21,23,24,27,29,30,31,32,35,43,53,67,68,74],electron:17,elma_s0002:3,elma_s0003:3,els:[10,14,29,30,31,74],emb:[17,21,39],embed:25,empti:38,enabl:[21,31],encod:[29,31,74],end:[6,14,17,20,21,23,29,30,31,34,51,52,59,69,74],engag:17,enhanc:23,enough:[0,19,21],ensur:23,entir:17,entranc:[11,29],entri:31,enumer:10,environ:27,equal:71,equival:[29,30,31,74],error:[21,40],escap:21,especi:21,essenti:27,estim:14,etc:[2,17,18,20,21,23,24,27,29,30,31,32,39,43,56,74],even:[0,21,49,74],even_s:42,eventu:21,everi:[17,29,30,31,74],everyon:[31,74],everyth:[4,10,17,21,22,23,24,27,32,39,43,74],everywher:39,exactli:[31,74],exampl:[4,10,16,17,19,21,22,24,26,29,30,31,35,36,37,39,67,74],except:[21,27],excerpt:4,exe:27,exec:2,execut:[27,74],exercis:14,exist:[4,23,29,30,31,73,74],exit:21,exot:24,expand:68,experi:21,experiment:74,explain:[9,14,15,16,17,22,23,24],explicit:[24,48],express:[29,30,31,74],extens:[24,29,31,74],extern:[39,60],extract:[4,24,29,30,31,41,74],ez_setup:27,f1_t:29,f2_t:29,f_t:29,face:[6,23],facebook:17,fact:[17,19,25,31,39,74],factor:[37,47,67,70],fade:[11,14,19,34,50,51,59],fadein:[4,11,42],fadeout:[4,11,42],fail:[24,27],fake_speech:17,fals:[3,4,9,14,15,16,21,24,29,30,31,41,68,74],fanci:23,faq:26,farawai:14,fast:[21,23,29,31,74],fast_bilinear:31,faster:[11,20,21,23,24,29,30,31,74],fastest:21,fault:31,featur:21,fed:74,feedback:27,fetch:21,few:[3,6,14,16,17,21,23,27,28],ffmeg:39,ffmpeg:[0,23,24,27,29,31,39,42,74],ffmpeg_binari:27,ffmpeg_extract_audio:41,ffmpeg_extract_subclip:41,ffmpeg_merge_video_audio:41,ffmpeg_movie_from_fram:41,ffmpeg_output:41,ffmpeg_param:[29,31,74],ffmpeg_res:41,ffmpeg_tool:41,field:[29,30,31,74],fig:25,file:[4,5,7,9,10,14,17,18,20,21,23,25,27,28,29,31,39,41,74],file_to_subtitl:74,filenam:[29,31,41,74],fill:[0,17,19,24,74],film:17,filter:[20,29,30,31,39,55,74],final_clip:[10,11,19],final_color:51,final_dur:[4,70],find:[3,4,11,12,17,23,24,31,39],find_audio_period:4,find_video_period:4,find_word:17,findobject:[3,10],fine:[0,4,23],finish:21,first:[7,9,19,20,21,23,24,27,29,30,31,33,74],fist:20,fit:[3,31],fix:[23,31],fl_im:14,fl_imag:[12,14,20,31,74],fl_mask:14,fl_time:[20,29,30,31,74],flashi:66,flashier:11,flask:23,flat:14,flatten:[31,74],flexibl:[19,23],flip:[64,65],flower:24,folder:[24,27,28,39,41],follow:[4,11,19,20,21,23,24,25,29,30,31,39,74],font:[4,7,10,11,14,15,16,23,24,31,74],fontsiz:[4,7,10,11,14,15,16,23,24,31,74],footag:16,forev:4,form:[19,23,24,25,29,74],format:[0,24,26,31,74],former:20,formerli:[31,74],formula:16,forum:[0,26],forward:[29,73],found:[4,17,28],four:10,fps:[0,4,5,7,10,11,12,14,16,21,24,25,29,30,31,40,41,74],fps_sourc:31,fraction:74,frame:[6,9,17,20,21,23,24,25,29,30,31,41,52,55,60,71,74],frame_for_time_t:25,frame_region:20,free:[17,31,74],freeimag:[31,74],freez:[6,42,53],freeze_at_end:43,freeze_dur:52,freeze_region:42,frequenc:29,frequent:20,friendli:31,from:[0,3,4,5,7,9,10,11,12,14,15,16,18,19,20,21,23,24,25,29,30,31,32,35,36,41,43,50,57,74],fromalpha:31,frontier:4,full:24,fulli:[24,31],fun:[29,30,31,40,74],func:[29,30,31,74],funcpo:10,fundament:24,fundat:0,further:22,futur:[23,32,43],fuzz:[31,74],galaxi:14,galleri:[22,26],game:17,gamma:54,gamma_corr:42,gap:74,garbag:21,gather:21,gaussian_filt:12,gener:[3,7,9,14,17,19,20,21,23,24,29,30,31,39,74],georgia:74,get:[2,4,6,7,18,21,24,26,29,30,31,42,43,74],get_fram:[15,18,20,24,29,30,31,74],get_npimag:24,gif:[21,23,25,26,27,31,57,73,74],gimp:9,github:[0,4,17,26,27],give:[7,17,66],given:[20,24,25,29,31,35,47,62,74],gizeh:[17,23,24],goal:[23,29,31,74],goe:[14,17,55,74],going:[23,31,74],good:[0,17,19,20],goodi:23,grad:[14,74],grad_width:[5,74],gradient:[8,14,23,74],gradient_width:74,gradmask:14,gradual:74,grandfath:14,graphic:[17,23,24,39],green:[20,56,74],grei:7,grey20:7,grey70:7,greyscal:[24,31,74],grsm_0005:3,guess:[20,24],hack:42,had:14,half:37,hand:[4,10,11,21,27],handl:18,happen:[17,20],happi:24,has:[3,4,7,15,19,21,23,24,29,30,31,40,60,61,63,74],has_constant_s:31,has_mask:31,have:[0,4,7,11,14,16,17,18,19,20,21,22,23,24,27,28,29,31,39,62,72,74],head:[7,55],headblur:[7,42],heavi:14,hei:7,height:[14,19,20,24,25,31,48,66,67,74],heigth:[31,67],hello:24,help:[0,1,21,26,31,39],helper:[10,39],here:[3,4,5,6,7,8,9,10,11,14,17,19,21,23,24,25,26,28],hesit:0,hidden:6,hide:[19,23],high:[23,29,30,31,74],higher:[17,31,74],highest:31,highlight:17,histor:4,holidai:23,horizont:[19,31,64,69,74],host:26,hour:[20,24,29,30,31,74],hous:17,how:[3,4,14,17,18,22,24,39,44],howev:[21,31,74],html5:[21,31,39,74],html:31,http:[4,31],huge:[31,74],hungri:20,hurri:[31,74],hxwx3:24,hxwxn:[29,30,31,74],hypothet:4,ideal:57,iff:[29,30,31,74],illustr:10,im1:74,im2:74,im_freez:11,imag:[3,11,12,14,17,20,21,23,25,27,29,30,31,55,66,74],image_file1:24,image_func:[31,74],imageclip:[3,5,9,10,12,14,20,42,74],imageio:[23,27,31,74],imagemagick:[23,24,27,31,39,41,74],imagemagick_binari:27,imagemagick_vers:27,images_list:24,imagesequenceclip:[31,74],imagin:[4,10],img:[14,31],immedi:[20,21],impact:[24,29,31,74],implement:[20,21,23,39,40],impli:20,implicit:31,implicitli:[21,29],importantli:21,imposs:24,improv:26,in_subclip:74,inc:0,includ:[26,29,30,31,74],incorpor:[31,74],increas:47,indefinit:[21,57,59],index:[31,74],indic:[19,23,24,29,31,48,53,70,74],inf:44,infinit:[20,24,29,30,31,57,74],infom:29,inform:[24,31,74],initi:[21,39,50],initial_color:50,inkscap:3,inplac:20,inputfil:41,insert:[23,26],insid:[21,25],inspir:17,instal:[0,2,4,11,21,24,26,31,39,66,74],install_c_exampl:28,install_python_exampl:28,instanc:[0,7,17,19,20,21,23,24,27,29,30,31,39,74],instead:[1,4,17,20,21,29,31,53,57,70,74],instruct:24,int16:14,integ:24,intend:27,intens:[21,29,30,31,55,74],interact:21,interest:[10,17,18,24],interfac:[23,39],interlin:[4,31],intern:[29,31],internet:28,interv:71,introductori:17,intuit:23,invers:[14,56],invert:20,invert_color:42,invert_green_blu:20,involv:[23,28],ipython:[4,21,23,26,39],ipython_displai:[24,25],is_plai:[29,30,31,74],ismask:[5,9,14,24,29,30,31,74],isn:[2,31,74],istanc:31,ital:7,iter:[24,29,30,31,74],iter_chunk:29,iter_fram:[12,29,30,31,74],iterfram:[29,30,31,74],its:[3,7,19,24,27,28,29,30,31,39,59,64,65,72,74],itself:39,jaquet:4,jean:74,job:74,join:14,jpeg:[21,24,31,74],jpg:14,just:[3,14,16,18,21,23,24,27,28,29,31,37,48],keep:[14,17,19,20,24,27,29,30,31,74],keep_dur:[29,30,31,74],kept:[29,31,74],kern:[4,10,24,31],keyword:[20,21],kind:[32,43,66],knight:4,know:[17,32],known:[0,19,21],kwarg:[29,30,31,74],label:[14,31],lambda:[9,10,14,15,16,19,20,29,30,31,67,74],lame:16,land:14,larg:[15,19,21,43],larger:[19,21,32,43],last:[17,20,21,23,29,30,31,74],later:[9,21,28],latex:14,launch:40,lavign:17,layer:[24,31,74],learn:23,least:27,leav:74,left:[5,19,31,48,60,74],len:10,length:18,less:[15,20,31,74],let:[14,15,17,23],letter:[6,24,31],level:[31,74],libfdk_aac:[31,74],libmp3lam:[31,74],librairi:17,librari:[17,21,23,24,25,27,31,39,74],librelist:[0,26],libvorbi:[31,74],libvpx:[31,74],libx264:[0,16,24,31,74],licenc:[4,26,39],like:[0,3,6,11,17,19,20,21,23,24,26,27,29,30,31,32,39,43,48,67,74],limit:74,line:[3,4,7,16,17,21,23,28,29,30,31,66,74],linear:[17,19,26,74],link:24,linspac:25,linux:[2,26,27,28],list:[3,7,10,24,26,29,31,74],littl:[14,31,42,74],live:[14,23],lky3ek9vptg:4,loacat:3,load:[3,4,5,7,9,20,21,23,24,29,32,37,39,43],load_imag:24,locat:[2,10,31,42,74],lock:21,log:[29,31,74],logfil:29,logger:[12,29,30,31,41,74],logic:[61,63],logo:6,longer:[21,24],look:[9,11,14,17,19,21,22,24,27,31,74],loop:[4,20,21,24,25,31,35,42,59,73,74],lost:20,lot:[3,11,21],louder:17,lower:[0,21,23,48],lum:58,lum_contrast:42,luminos:[39,58],m4a:[31,74],mac:[2,26,27],machin:[23,31,74],made:[9,14,24,29,31,74],magick:27,mai:[20,23,27,28,29,31,42],mail:26,main:[5,24,39],main_clip:5,mainli:[29,31,74],maintain:[45,68],make:[4,5,9,11,14,16,17,19,20,21,23,24,28,29,31,41,43,45,46,49,50,51,59,68,69,73,74],make_fram:[24,25,29,30,31,74],make_loop:42,make_textclip:74,makefram:24,manag:[21,28,29,31],mandatori:31,mani:[14,19,20,23,24,31,39,74],manipul:27,manual:[7,24,26],manual_track:7,mar:60,marcel:74,margin:[4,16,19,42],mark:[31,74],martin:74,mask:[3,5,8,9,14,15,19,20,23,29,30,31,39,40,50,51,53,56,60,61,62,63,64,65,69,70,72,73,74],mask_and:42,mask_clip:5,mask_color:42,mask_margin:60,mask_or:42,maskclip:24,match:4,matplotlib:[17,22,23],matter:[31,74],max:[10,15,16,29,30,31,63,74],maximum:[29,30,31,36,74],mayavi:[17,23],mayb:19,mean:[0,21,25,29,71],meant:[29,30,31,32,43,74],media:[17,23],medium:[31,74],megamet:14,meme:17,memoiz:[29,30,31,74],memori:[20,23,29,30,31,74],mencod:23,merg:41,mess:31,messag:[31,39,74],messi:42,meta:14,metadata:31,meteor:14,method:[14,21,24,25,27,29,30,31,37,39,40,53,74],middl:15,midi:17,mimic:14,min:[20,29,30,31,61,74],mind:[19,23,24,27],minimum:14,minut:[20,28],mirror_i:[19,42],mirror_x:[4,19,42],mirrorx:[29,30,31,74],misc:[39,41],mit:26,mix:[18,22,23,24],mkdir:28,modif:[20,24,43],modifi:[3,16,20,23,24,29,30,31,40,74],modifiedclip1:20,modifiedclip2:20,modifiedclip:20,modul:[20,21,26,27,32,39,43,74],moi_ukulel:16,moment:[20,42,74],momentarili:52,mono:[29,30,31,74],more:[1,3,14,15,17,18,19,20,21,23,24,27,31,39],most:[6,17,19,20,21,24,26,31],motif:3,motion:[31,71],motiv:[29,30,31,74],mountain:9,mountainmask2:9,mountainmask:9,mov:[3,9,15,16,31],move:[6,14,19,21,31,55,74],movelett:10,movement:10,movi:[5,6,7,14,15,17,23,24,31,41,74],moviepi:[1,3,4,5,6,7,9,10,11,12,14,15,16,19,24,25,26,27,29,30,31,32,40,41,42,74],movies:[14,16],moving_txt:14,moyen:4,mp3:[18,21,23,24,29,31,74],mp4:[3,4,5,7,11,12,16,17,19,20,21,23,25,29,30,31,35,36,74],mpeg4:[0,5,10,11,24,31,74],mpeg:31,mplfig_to_npimag:25,mpy:[21,24],much:[20,24,29,31],multicor:[31,74],multipli:[11,31,32,37,47,67,70,74],music:[4,6,35,74],must:[4,7,17,20,24,31,50,51,52,72,74],mute:33,my_anim:25,my_audio_clip:21,my_audioclip:18,my_audiofil:24,my_clip:[19,20,21,24],my_concaten:19,my_effect:39,my_imageclip:21,my_mask:24,my_mask_clip:24,my_new_clip:20,my_new_video:[31,74],my_pictur:[21,24],my_sound:21,my_stack:19,my_video:[12,21],my_video_clip:21,my_video_fil:24,myclip2:24,myclip:[24,29,30,31,43,67,74],myholidai:[23,31],myholidays_edit:23,myhous:31,mymaskvideo:31,myscript:2,mysoundarrai:29,myvideo2:19,myvideo3:19,myvideo:[19,20,24,29,30,31,35,36,74],name:[24,27,29,30,31,74],nameformat:[31,74],names_list:[31,74],nation:[3,4],nbyte:29,nchannel:29,ncolor:74,nearer:31,nearest:68,necessarili:[29,31,74],need:[4,7,18,19,20,21,22,24,27,74],neg:[20,29,30,31,44,74],neither:[20,27],neuquant:[31,74],never:[14,24],new_dur:44,new_t:[29,30,31,74],newaudio:32,newclip:[20,29,30,31,37,43,68,74],newcom:23,newsiz:[66,67],next:[0,4,7,10,24,74],nframe:[12,71],nice:[14,17,24,31,74],nletter:10,nloop:35,non:[17,19,26,31,53,62,74],none:[14,29,30,31,35,40,41,44,45,48,50,51,52,53,55,57,60,62,66,67,69,70,74],nor:[20,27],normal:[24,31,32,36,74],north:31,notabl:18,note:[3,7,18,19,20,21,24,29,31,32,74],notebook:[4,21,23,24,26,39],noth:[20,31],notimplementederror:21,now:[4,6,14,17,19,20,24,31,74],nul:62,number:[12,17,24,29,31,48,50,51,57,74],numer:[23,24,29],numerot:[31,74],numpi:[10,14,23,24,25,27,29,30,31,74],numpy_arrai:24,object:[23,24,30,39],obtain:[11,16,19,24],occur:[44,74],off:[28,31,74],offscreen:55,offset:74,ogg:[24,31,35,74],ogv:[31,74],old:6,older:27,on_color:[14,16,31,74],onc:[7,9,21,24,27,31,73],one:[0,2,4,8,12,14,15,16,17,19,20,21,23,24,27,29,31,41,53,67,68,70,74],ones:[31,74],onli:[0,8,9,14,17,19,20,21,23,24,27,29,31,37,43,46,48,53,68,74],onlin:[14,17,21],opac:[4,16,31,60,74],opaqu:[15,16,31,74],opeing:28,open:[0,6,7,17,26,27,28,31,74],opencv:[23,26,27,55],oper:[23,26],opt:[31,74],optim:[24,31,74],optimizeplu:[31,74],optimizetranspar:[31,74],option1:[29,31,74],option2:[29,31,74],option:[0,19,21,23,24,28,29,30,31,74],optionn:19,order:[20,21,24,74],org:31,organ:42,origin:[4,11,20,21,26,29,30,31,48,62,74],oscil:20,other:[11,17,19,20,23,24,28,31,43,53,74],other_clip:[61,63],other_paramet:39,otherwis:[4,20,21,24,29,31],out:[14,21,31,34,41,74],outplac:[29,30,31,40,74],output:[24,25,29,31,41,74],output_shap:14,outside_region:53,over:[8,11,19,23,24,29,30,31,33,34,35,50,51,74],overlai:[11,23,53],overlaid:[8,31,74],own:[23,59],packag:[14,27,28,39],padding_end:52,page:[17,22],paint:[6,42],painting_fad:11,painting_txt:11,paramet:[24,29,30,31,45,53,57,60,67,68,74],parametr:62,park:3,part:[4,5,8,11,14,17,19,23,24,31,48,55,74],partial:6,particular:[3,42,74],particularli:21,pass:[29,31,74],past:16,path:[4,27,39],pcm_s16le:[29,31,74],pcm_s32le:[29,31,74],peopl:17,per:[9,12,24,29,30,31,74],perfect:[23,31,74],perform:[17,29],period:4,perspect:14,philosophi:16,phone:5,photo:[11,66],physic:[17,21],piano:16,pic:14,pickl:7,pictur:[6,8,14,19,20,24,29,30,31,41,45,53,61,63,74],pil:[23,27],pillow:27,pip:27,pixel:[9,12,19,20,21,24,29,30,31,48,53,56,60,62,67,74],place:[6,14,20,21,31,74],placebo:[31,74],plai:[0,19,20,21,24,29,30,31,35,41,57,70,72,73,74],playabl:17,playing_clip:31,pleas:[0,24,27,29,31,74],plot:25,plt:25,plugin:10,png:[3,9,12,21,24,31,74],point:[11,17,29,66,74],popular:31,portabl:23,portion:29,pos:[14,16,31,74],posit:[7,20,21,31,44,55,74],possibl:[17,20,24,31,74],post:[17,24],potenti:[29,30,31,74],pov:17,power:17,practic:[20,21,24],practiv:73,precis:17,prefer:[20,74],preprocess_arg:40,present:[1,21,22,24],preserve_luminos:45,preset:[29,30,31,74],press:[17,21],pretti:24,preview:[4,5,20,23,24,27,29,39],previou:[11,14],previous:7,princ:14,princess:14,print:[4,21,24,28,29,30,31,39,74],print_cmd:31,priori:24,problem:[20,21,23,29],process:[17,20,21,23,26,27,29,30,31,74],processor:24,produc:[25,29,30,31,74],proglog:[29,31,74],program:[20,21,24,27,29,31,39,74],progress:[15,31,33,34,50,51,59,62,74],progress_bar:[31,74],project:39,projectivetransform:14,prompt:2,properti:27,provid:[19,20,21,24,27,29,30,31,39,40,52,53,74],purpl:56,push:26,put:[5,7,17,19,21,23,24,29],pwd:2,pygam:[21,23,27,29,39],pyod:17,pypi:27,pyplot:25,python:[2,17,21,23,26,27,28,74],qu7hjrseyfg:4,qualiti:[23,24,29,31,74],quantiz:29,quick:[6,21,22],quit:[3,6],r_blur:55,r_zone:55,rad:68,radial:74,radian:68,radiu:[12,15,24,55,74],rai:17,rais:[7,21,40],ram:[21,24,31,74],rare:31,rate:[21,24,29,31,74],ratio:[3,20,31,67],raw:[14,16],rawvideo:[31,74],rbg:45,reach:74,read:[20,23,24,26,27,29,30,31,32,39,42,74],readabl:0,reader:[0,29,31],readi:1,readm:39,real:[0,16,21,31,74],realli:[23,27,29,30,31,74],reason:[0,20,23],recent:[0,27],recip:[6,11],recogn:20,reconstitut:6,rectangl:[8,48],rectangular:48,red:[29,30,31,74],reddit:[0,26],redo:28,reduc:[21,23],refer:[24,26],region:[3,14,20,31,48,53,74],regroup:[32,43,74],regular:[11,16,24,74],reight:47,rel:[19,31,39,74],relat:24,relative_po:31,releas:[21,26,28,29,30,31,74],relev:20,remain:53,remov:[17,28,31,48,74],remove_temp:[31,74],renam:4,render:[0,3,11,14,17,20,21,23,24],replac:[6,16,29,30,31,56,71,74],replai:21,repo:27,report:[0,26],repositori:0,repres:[15,20,24,29,30,31,67,68,74],requir:[4,11,14,20,21,24,27,31,39,55,66],requires_dur:40,res:31,resampl:68,reset:[29,30,31,74],resiz:[0,3,14,16,19,20,21,23,24,27,29,30,31,41,42,43,68,74],resize_algorithm:31,resizecent:14,resolut:[14,67],resourc:[21,29,30,31,74],respect:[19,39,74],rest:[17,53],restart:21,result:[4,7,14,17,20,23,29,30,31,41,61,63,66,71,74],review:39,rewrite_audio:[31,74],rgb:[24,29,30,31,40,45,74],right:[5,16,21,48,60],rincomcam:17,rincon:17,roadmap:0,roman:4,romo_0001:3,romo_0004:3,root:39,rotat:42,rotmatrix:10,rule:21,run:[7,21,23,26,28,40],sai:[11,23,29,30,31,74],said:20,sam:17,same:[1,5,11,16,18,19,20,21,23,24,31,39,40,68,70,72,74],sampl:29,satur:[11,66],save:[7,9,21,31,41,74],save_fram:[9,12,24,31,74],scale:67,scaler:31,scene:[5,17,23,31,74],schmuzig:4,schoolbook:[4,7],scienc:[29,30,31,74],scikit:[11,17,23,27,66],scipi:[23,27,66],screen:[4,5,8,14,16,19,23,53,74],screenpo:[3,10],screensiz:[10,15,74],script:[3,7,10,14,22,23,26,31,39],scroll:[9,14,20,29,30,31,39,42,43,74],scrolling_credit:9,search:[24,31],sec:[20,24,29,30,31,74],second:[4,9,11,12,19,20,21,23,24,25,27,29,30,31,33,34,35,40,46,50,51,52,59,74],second_read:29,secretari:17,section:[0,1,18,21,23,24],see:[0,1,9,15,18,19,20,21,23,24,27,29,30,31,39,50,51,74],seem:28,seen:[11,14,24],segment:[3,4,10,27,42],select:23,self:[29,30,31,57,72,74],semi:[16,31,74],sentenc:17,separ:[10,39],sequenc:[31,74],seri:[23,24],seriou:4,serv:[31,74],server:[23,27],session:21,set:[19,24,27,29,30,31,45,52,57,60,72,74],set_audio:[4,7,18,20,31,35,74],set_dur:[4,7,11,14,15,20,23,24,29,30,31,39,74],set_end:[29,30,31,74],set_fp:[29,30,31,74],set_ismask:[29,30,31,74],set_make_fram:[29,30,31,74],set_mask:[3,5,9,14,20,21,24,31,74],set_memo:[29,30,31,74],set_opac:[31,74],set_po:[3,5,9,10,11,14,15,16,23,31,74],set_posit:[4,19,31,74],set_start:[19,20,29,30,31,74],set_ylim:25,setup:[27,39],setuptool:27,sever:[11,19,20,27,29,31,74],shadow:6,shallow:[29,30,31,74],shape:[14,23,74],sharp:74,shop:14,shortcut:21,should:[14,24,27,29,30,31,39,48,57,66,74],shouldn:21,show:[5,14,17,18,24,31],shown:24,side:19,sigma:12,signatur:[29,30,31,74],silli:4,similar:18,simpl:[6,19,23,28,39],simplecv:23,simpler:20,simpli:[31,39,74],simul:17,sin:[10,20,25,29],sinc:[10,25,31],sine:29,singl:[19,32,43],site:17,situat:74,size:[3,4,5,7,10,14,15,16,19,21,24,29,31,41,60,74],skimag:[12,14],skip:[29,30,31,74],slight:14,slightli:74,slow:[4,9,21,23,31,67,74],slower:[25,31,74],small:[5,24],smaller:[15,19,31,74],snapshot:[21,24],snd:29,sobel:11,soccer:17,social:17,softwar:[17,23,26,27,31,74],solar:14,soldier:17,solut:23,solv:[0,21],some:[0,3,4,14,17,19,20,21,23,24,27,28,30,31,32,39,50,51,66,74],some_anim:24,some_audiofil:18,some_fil:20,some_fold:[31,74],some_pictur:24,some_video:[18,24],some_video_clip:24,somearrai:31,someon:6,someth:[4,17,20,23],sometim:[19,24,31],somewher:21,somme_arrai:24,sonata:16,song:[21,29],soon:[6,16,44],sort:[17,24,39],sound:[0,6,14,16,20,21,27,29,30,31,33,34,41,74],soundfil:29,soundtrack:[18,19,24,31,74],sourc:[17,19,23,26,27,28,29,30,31,33,34,35,36,37,39,40,41,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74],south:31,space:[14,31,71],speak:39,special:[20,23,24,45],specif:[24,27],specifi:[3,19,21,24,29,30,31,40,52,57,62,74],speed:[20,21,31,44,70,74],speedup:44,speedx:[4,9,20,31,42,74],spend:[31,74],split:[5,74],src:14,srt:74,stabil:23,standard:[24,39],star:6,stars_darken:14,start:[2,6,17,21,26,29,30,31,43,48,74],starworm:14,statement:21,step:[0,9,11,14,28],stereo:[29,30,31,74],stiff:62,still:[0,17,21,31,74],stop:30,store:[29,39],stori:74,strang:28,stream:[23,31],stretch:74,strictli:27,string:[20,29,30,31,74],stroke:[24,31,74],stroke_color:[31,74],stroke_width:[24,31,74],studi:4,studio:0,stuff:23,sub:74,subappli:[31,74],subclip:[3,4,5,7,9,10,11,12,14,15,16,19,20,23,24,29,30,31,39,74],subfold:39,subfx:[31,74],submodul:[1,21,39],subpackag:21,subplot:25,subprocess:[21,39],subregion:48,subtitl:[17,23,39,42],subtitlesclip:74,success:23,sudo:[27,28],sum:[12,45],summar:24,supercut:17,superfast:[31,74],superimpos:14,supersampl:42,supervisor:74,support:[24,29,31,74],suppos:20,sure:23,surfac:[14,24],surfer:17,swell:67,symetr:74,symmetr:4,syntax:20,system:4,t_end:[20,29,30,31,74],t_func:[29,30,31,74],t_start:[20,29,30,31,74],take:[3,8,11,12,17,20,21,25,27,28,31],taken:71,talk:5,target_resolut:31,targetnam:41,task:[23,39],tast:3,tbr:31,technic:[1,24],tediou:23,tell:[17,21,40,66],temp_audiofil:[31,74],tempfil:[31,74],tempfilenam:31,tempo:4,temporari:[31,74],temporarili:[29,31,74],temptxt:31,ten:23,term:[29,31,74],termin:[27,28],test:[2,24],test_issu:2,text:[6,7,9,11,14,16,17,23,24,27,31,74],textclip:[4,7,10,11,14,15,16,23,42,74],tform:14,tfreez:11,than:[19,23,24,29,30,31,74],the_end:15,theclip:[29,30,31,74],theend:15,thei:[7,14,18,19,20,21,23,24,29,30,31,32,39,73,74],them:[4,19,21,23,24,31,52,68,74],thi:[0,1,3,4,5,7,9,10,14,15,16,17,18,19,20,21,23,24,25,27,28,29,30,31,32,37,39,40,43,48,59,60,62,71,73,74],thing:21,think:[10,14],third:16,those:21,though:74,thr:62,thread:[31,74],three:[7,11,20,21,53],threshold:62,through:[0,17,21,23,24,28,29,30,31,74],thumb:21,tiff:[24,31],time:[0,3,5,7,11,14,17,21,23,24,25,29,30,31,35,39,41,42,52,53,55,57,67,71,74],time_func:31,time_mirror:[20,42],time_symmetr:[12,42],timelin:[20,29,30,31,74],tini:[31,74],tip:21,titl:[4,6,17,23,26],tmin:4,to_audiofil:29,to_fxfi:7,to_gif:[31,74],to_imageclip:[4,11,24,31,74],to_images_sequ:[31,74],to_mask:[24,31,74],to_rgb:[14,24,31,74],to_soundarrai:29,to_videofil:[31,74],togeth:[5,17,19,23,24,29,31],toggl:[31,74],too:[7,21,50,51,64,65],tool:[3,4,5,7,9,10,14,15,23,26,39,42],top:[4,19,29,30,31,48,60,74],total:[12,16,23,52,57,74],total_dur:52,total_imag:12,toward:14,tqdm:27,tracer:17,track:[6,17,18,19,23,27,39,42],tranform:31,transcrib:17,transform:[14,17,21,22,23,24,29,30,31,43,66,74],transit:[17,19],transmit:24,transpar:[15,16,24,31,60,62,68,74],trapezoid:14,trapzwarp:14,treat:24,treatment:[29,30,31,74],trespass:29,trial:21,trick:21,triplet:20,troubl:27,troubleshoot:26,trust:0,tti:[29,30,31,74],tunabl:[31,74],tune:[4,23],tupl:53,turn:[20,23,24,39],tutori:[9,14,17],tweet:[17,26],twice:[29,30,31,74],two:[4,5,7,19,20,21,24,29,30,31,32,39,43,61,63,74],txt:[7,9,11,14,16,31,74],txt_clip:23,txt_col:16,txt_color:14,txt_credit:4,txt_mov:16,txt_speed:14,txt_titl:4,txtclip:[10,14],txy:7,type:[21,24,27,28,31,32,43,74],typic:23,ubuntu:14,ugli:7,uint8:[14,29,30,31,74],ukulel:16,ultracomposit:3,ultrafast:[31,74],unanim:24,under:[4,17,26],underground:14,understand:[16,23,24],undocu:17,unfil:31,unit:68,unless:[19,24,31,74],unmask:31,unmodifi:40,unsaf:21,unstabl:28,until:74,unzip:[27,28],updat:[29,30,31,74],upload:17,usabl:29,use:[3,17,18,20,23,24,27,29,30,31,32,37,39,40,43,74],use_bgclip:31,use_clip_fps_by_default:40,used:[0,11,12,17,20,23,24,26,27,28,29,30,31,32,37,39,40,43,45,47,50,51,74],useful:[21,23,31,39,74],useless:27,user:[4,23,24,27,31],uses:[0,14,17,21,23,24],using:[5,11,14,20,21,23,25,27,29,30,31,74],usual:25,utf:4,valid:[21,31],valu:[29,30,31,37,45,56,60,68,74],value1:[29,31,74],value2:[29,31,74],vanish:[14,15],vapori:17,vari:[24,74],variabl:[14,27,31,40],variat:29,varnam:40,vcodec:41,vector:[24,29,30,31,74],ventur:14,verbos:[29,31,74],veri:[0,4,18,19,20,21,23,28,31,39,73,74],version:[0,12,20,23,27,28,53,67],vertic:[19,65,69,74],veryfast:[31,74],veryslow:[31,74],vfx:[4,7,9,11,12,19,20,23,32,42],video:[1,3,4,5,6,7,9,10,11,14,15,18,19,20,21,22,23,25,26,27,29,30,32,33,36,37,39,40,41,42],video_period:4,videoclip2:18,videoclip:[18,21,23,25,29,30,35,36,39,40,42,43,74],videofil:[31,74],videofileclip:[3,4,5,7,9,11,12,15,16,18,19,20,21,23,29,30,35,36,39,42,74],view:17,visibl:[8,19,24,31,53],vision:[29,30,31,74],vispi:17,vizual:17,vizualis:21,vlc:0,volum:[23,32,36,37],volumex:[5,19,23,29,30,31,32,42,74],vortex:10,vortexout:10,vox:4,wai:[10,18,19,20,23,24,29,30,31,39,59,74],want:[16,18,19,20,21,23,24,26,27,31,42,66,74],war:6,warn:[14,28],warp:14,warped_txt:14,watch:[4,29],wav:[21,24,29,31,74],wave:29,web:[23,31,74],webapp:0,webcam:23,webm:[23,31,74],webserv:21,websit:0,week:17,weight:45,well:[4,10,21,24,31,73,74],were:0,west:[14,31],western:11,what:[4,11,17,19,21,28,29],when:[11,14,17,18,19,20,23,24,29,30,31,32,37,39,43,62,74],whenev:20,where:[2,19,20,24,26,29,30,31,34,50,51,55,62,74],wheter:[29,30,31,74],whether:[29,31,74],which:[0,4,11,14,15,17,19,20,23,24,26,27,29,30,31,32,39,40,48,53,62,66,74],white:[4,10,11,14,15,16,17,20,23,24,39,45,56,74],who:[14,16],whole:[8,20,29,68,74],whose:[29,30,31,36,74],wht:74,why:23,wide:48,width:[19,20,21,23,24,25,29,31,43,48,60,66,67,74],wihtout:29,wind:14,window:[2,21,24,26,27,28],wish:31,with_mask:[31,74],with_tbb:28,with_tim:[29,30,31,74],with_v4l:28,withmask:[24,31,74],without:[3,21,27,29],without_audio:[31,74],won:[24,29,31,74],word:[17,24,47,53],work:[0,14,21,22,24,26,27,28,31,46,48,74],workflow:25,world:17,worldwid:21,worri:[18,19,20,27],would:[14,20,29,31,74],wrap:[24,31],write:[4,5,7,10,14,20,21,23,24,26,27,29,30,31,37,39,40,41,43,74],write_audiofil:29,write_gif:[24,25,31,74],write_images_sequ:[31,74],write_logfil:[29,31,74],write_videofil:[3,4,5,7,9,10,11,12,14,15,16,19,23,24,25,29,30,31,74],written:[15,17,26,29,43],www:4,x_center:48,x_speed:69,x_start:69,xolonium:14,xve:4,y_center:48,y_speed:69,y_start:69,year:[14,17],yet:[1,23],yield:60,you:[0,1,2,3,4,6,7,11,14,15,17,18,19,20,21,22,23,24,25,26,27,29,31,32,37,39,42,43,52,53,62,68,74],your:[0,18,19,21,23,24,27,28,31,74],yourself:[19,20],youtub:4,zero:[19,44,74],zip:[3,17],zoom:39,zulko:[4,26],zulkoninov:16,zvcvoc2vwdc:4},titles:["FAQ and troubleshooting","Advanced tools","Moviepy Docker","Placing clips according to a picture","A reconstitution of 15th century dancing","An example with sound","Example Scripts","Tracking and blurring someone\u2019s face","MoviePy logo with a moving shadow","Partially Hidden credits","Text with moving letters","Freezing a movie frame with a painting effect","Quick recipes","Character duplication in a video","A Star-Wars like opening title","\u201cThe End\u201d effect","A simple music video","Gallery","Audio in MoviePy","Mixing clips","Clips transformations and effects","How to be efficient with MoviePy","Getting started with MoviePy","Quick presentation","Creating and exporting video clips","Working with matplotlib","User Guide","Download and Installation","So you want to install OpenCV 2.4.6 ?","AudioClip","Clip","Classes of Video Clips","audio.fx","moviepy.audio.fx.all.audio_fadein","moviepy.audio.fx.all.audio_fadeout","moviepy.audio.fx.all.audio_loop","moviepy.audio.fx.all.audio_normalize","moviepy.audio.fx.all.volumex","audio.tools","Organization of MoviePy\u2019s code","Decorators","FFMPEG tools","Reference Manual","moviepy.video.fx (vfx)","moviepy.video.fx.all.accel_decel","moviepy.video.fx.all.blackwhite","moviepy.video.fx.all.blink","moviepy.video.fx.all.colorx","moviepy.video.fx.all.crop","moviepy.video.fx.all.even_size","moviepy.video.fx.all.fadein","moviepy.video.fx.all.fadeout","moviepy.video.fx.all.freeze","moviepy.video.fx.all.freeze_region","moviepy.video.fx.all.gamma_corr","moviepy.video.fx.all.headblur","moviepy.video.fx.all.invert_colors","moviepy.video.fx.all.loop","moviepy.video.fx.all.lum_contrast","moviepy.video.fx.all.make_loopable","moviepy.video.fx.all.margin","moviepy.video.fx.all.mask_and","moviepy.video.fx.all.mask_color","moviepy.video.fx.all.mask_or","moviepy.video.fx.all.mirror_x","moviepy.video.fx.all.mirror_y","moviepy.video.fx.all.painting","moviepy.video.fx.all.resize","moviepy.video.fx.all.rotate","moviepy.video.fx.all.scroll","moviepy.video.fx.all.speedx","moviepy.video.fx.all.supersample","moviepy.video.fx.all.time_mirror","moviepy.video.fx.all.time_symmetrize","video.tools"],titleterms:{"15th":4,"class":31,"export":[18,24],"new":18,The:[15,17,21],accel_decel:44,accord:3,add:12,advanc:1,advantag:23,all:[12,33,34,35,36,37,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73],ani:0,anim:[17,24,25],art:12,attribut:20,audio:[18,19,32,33,34,35,36,37,38],audio_fadein:33,audio_fadeout:34,audio_loop:35,audio_norm:36,audioclip:[18,29],audiofileclip:29,averag:12,basic:23,befor:12,blackwhit:45,blink:46,blur:[7,12],bug:0,can:0,cannot:0,categori:24,centuri:4,chang:20,charact:13,clip:[3,18,19,20,21,24,30,31],close:21,code:[23,39],colorclip:31,colorx:47,common:0,composit:[12,18,19],compositeaudioclip:29,compositevideoclip:[19,31],concaten:19,concept:23,contribut:26,cover:17,creat:[18,20,24],credit:[1,9,74],crop:48,cup:17,custom:[20,25],cut:[1,12],danc:4,data:17,decor:40,defin:25,depend:27,detect:12,docker:2,download:27,draw:74,duplic:13,edit:17,editor:21,effect:[11,12,15,20],effici:21,end:15,error:0,even_s:49,exampl:[5,6,23,25],face:7,fadein:50,fadeout:51,faq:0,favorit:0,ffmpeg:41,file:24,filter:12,frame:[11,12],freez:[11,52],freeze_region:53,from:[2,17],fun:12,galleri:17,gamma_corr:54,gener:0,get:[12,22],gif:[12,17,24],git:2,guid:26,headblur:55,hidden:9,how:[21,23],imag:24,imageclip:[24,31],imagesequenceclip:24,instal:[27,28],invert_color:56,ipython_displai:21,jupyt:25,kapw:17,letter:10,like:14,limit:23,logo:8,loop:57,lum_contrast:58,made:[17,18],make:[0,12],make_loop:59,mani:21,manual:42,margin:60,mask:24,mask_and:61,mask_color:62,mask_or:63,matplotlib:25,method:20,mirror_i:65,mirror_x:64,misc:17,mix:[17,19],move:[8,10],movi:11,moviepi:[0,2,8,17,18,20,21,22,23,33,34,35,36,37,39,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73],mp4:24,music:[16,17],need:23,notebook:25,ogv:24,old:17,open:14,opencv:28,option:27,organ:39,other:27,own:2,paint:[11,66],partial:9,piano:17,pictur:3,place:3,player:0,posit:19,prequisit:2,present:23,preview:[0,18,21],program:17,project:17,quick:[12,23],read:0,recip:12,reconstitut:4,reel:17,refer:42,repo:2,represent:20,resiz:67,rinconcam:17,roll:17,rotat:68,run:2,save_fram:21,scene:12,scientif:17,script:[2,6,17],scroll:69,seem:0,segment:74,shadow:8,sheet:17,should:21,show:21,simpl:[16,25],slower:0,someon:7,song:17,sound:5,speedx:70,stack:19,star:14,start:[19,22],step:2,stop:19,subtitl:[1,74],supersampl:71,technolog:17,text:10,textclip:[24,31],than:0,thei:0,them:0,time:[19,20],time_mirror:72,time_symmetr:73,titl:[12,14],tool:[1,38,41,74],track:[1,7,74],transcript:17,transform:20,troubleshoot:0,unittest:2,use:21,useful:27,useless:12,user:26,using:17,vector:17,vfx:43,video:[0,12,13,16,17,24,31,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74],videoclip:[24,31],videofileclip:[24,31],videogrep:17,volumex:37,wai:21,want:28,war:14,webm:24,what:18,when:21,work:[23,25],you:28,your:2}}) \ No newline at end of file