diff --git a/.gitignore b/.gitignore index 4ab62778..4d2fc96a 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ docs/_build .mypy_cache .python-version .pytest_cache +.idea diff --git a/CHANGES/203.feature b/CHANGES/203.feature new file mode 100644 index 00000000..6567221b --- /dev/null +++ b/CHANGES/203.feature @@ -0,0 +1 @@ +Add new low-level API and high-level model classes for Images and Containers \ No newline at end of file diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index 15c6b572..3a86402e 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -1,3 +1,4 @@ +Andreas Krebs Andrew Svetlov Byeongjun Park Cecil Tonglet diff --git a/aiodocker/__init__.py b/aiodocker/__init__.py index 1de30c93..8bb46e52 100644 --- a/aiodocker/__init__.py +++ b/aiodocker/__init__.py @@ -1,7 +1,11 @@ from .docker import Docker +from .api.client import APIClient +from .client import DockerClient __version__ = '0.11.0a0' -__all__ = ("Docker", ) +__all__ = ("Docker", + "APIClient", + "DockerClient") diff --git a/aiodocker/api/__init__.py b/aiodocker/api/__init__.py new file mode 100644 index 00000000..7260e953 --- /dev/null +++ b/aiodocker/api/__init__.py @@ -0,0 +1 @@ +from .client import APIClient diff --git a/aiodocker/api/client.py b/aiodocker/api/client.py new file mode 100644 index 00000000..072b302d --- /dev/null +++ b/aiodocker/api/client.py @@ -0,0 +1,226 @@ +import asyncio +import json +import logging +import os +from pathlib import Path +import re +import ssl + +import aiohttp +from yarl import URL + +from ..utils import utils + +# Sub-API classes +from .container import DockerContainerAPI +from ..events import DockerEvents +from ..errors import create_api_error_from_response +from .image import DockerImageAPI +from .system import DockerSystemAPI +# from .logs import DockerLog +# from .swarm import DockerSwarm +# from .services import DockerServices +# from .tasks import DockerTasks +# from .volumes import DockerVolumes, DockerVolume +# from .nodes import DockerSwarmNodes +# from .system import DockerSystem + +# __all__ = ( +# 'Docker', +# 'DockerContainers', 'DockerContainer', +# 'DockerEvents', +# 'DockerError', +# 'DockerImages', +# 'DockerLog', +# 'DockerSwarm', +# 'DockerServices', +# 'DockerTasks', +# 'DockerVolumes', 'DockerVolume', +# 'DockerSwarmNodes', +# 'DockerSystem' +# ) + +log = logging.getLogger(__name__) + +_sock_search_paths = [ + Path('/run/docker.sock'), + Path('/var/run/docker.sock'), +] + +_rx_version = re.compile(r'^v\d+\.\d+$') +_rx_tcp_schemes = re.compile(r'^(tcp|http)://') + + +class APIClient: + def __init__(self, + url=None, + connector=None, + session=None, + ssl_context=None, + api_version='v1.30'): + + docker_host = url # rename + if docker_host is None: + docker_host = os.environ.get('DOCKER_HOST', None) + if docker_host is None: + for sockpath in _sock_search_paths: + if sockpath.is_socket(): + docker_host = 'unix://' + str(sockpath) + break + self.docker_host = docker_host + + assert _rx_version.search(api_version) is not None, \ + 'Invalid API version format' + self.api_version = api_version + + if docker_host is None: + raise ValueError( + "Missing valid docker_host." + "Either DOCKER_HOST or local sockets are not available." + ) + + if connector is None: + if _rx_tcp_schemes.search(docker_host): + if os.environ.get('DOCKER_TLS_VERIFY', '0') == '1': + ssl_context = self._docker_machine_ssl_context() + docker_host = _rx_tcp_schemes.sub('https://', docker_host) + else: + ssl_context = None + connector = aiohttp.TCPConnector(ssl_context=ssl_context) + self.docker_host = docker_host + elif docker_host.startswith('unix://'): + connector = aiohttp.UnixConnector(docker_host[7:]) + # dummy hostname for URL composition + self.docker_host = "unix://localhost" + else: + raise ValueError('Missing protocol scheme in docker_host.') + self.connector = connector + if session is None: + session = aiohttp.ClientSession(connector=self.connector) + self.session = session + + self.events = DockerEvents(self) + self._container = DockerContainerAPI(self) + self._image = DockerImageAPI(self) + # self.swarm = DockerSwarm(self) + # self.services = DockerServices(self) + # self.tasks = DockerTasks(self) + # self.volumes = DockerVolumes(self) + # self.nodes = DockerSwarmNodes(self) + self._system = DockerSystemAPI(self) + + async def close(self): + await self.events.stop() + await self.session.close() + + async def auth(self, **credentials): + response = await self._query_json( + "auth", "POST", + data=credentials, + ) + return response + + async def version(self): + data = await self._query_json("version") + return data + + def _canonicalize_url(self, path): + return URL("{self.docker_host}/{self.api_version}/{path}" + .format(self=self, path=path)) + + async def _query(self, path, method='GET', *, + params=None, data=None, headers=None, + timeout=None): + ''' + Get the response object by performing the HTTP request. + The caller is responsible to finalize the response object. + ''' + url = self._canonicalize_url(path) + if headers and 'content-type' not in headers: + headers['content-type'] = 'application/json' + try: + response = await self.session.request( + method, url, + params=utils.httpize(params), + headers=headers, + data=data, + timeout=timeout) + except asyncio.TimeoutError: + raise + if (response.status // 100) in [4, 5]: + await create_api_error_from_response(response) + return response + + async def _query_json(self, path, method='GET', *, + params=None, data=None, headers=None, + timeout=None): + """ + A shorthand of _query() that treats the input as JSON. + """ + if headers is None: + headers = {} + headers['content-type'] = 'application/json' + if not isinstance(data, (str, bytes)): + data = json.dumps(data) + response = await self._query( + path, method, + params=params, data=data, headers=headers, + timeout=timeout) + data = await utils.parse_result(response) + return data + + async def _websocket(self, path, **params): + if not params: + params = { + 'stdin': True, + 'stdout': True, + 'stderr': True, + 'stream': True + } + url = self._canonicalize_url(path) + # ws_connect() does not have params arg. + url = url.with_query(utils.httpize(params)) + ws = await self.session.ws_connect( + url, + protocols=['chat'], + origin='http://localhost', + autoping=True, + autoclose=True) + return ws + + @staticmethod + def _docker_machine_ssl_context(): + """ + Create a SSLContext object using DOCKER_* env vars. + """ + context = ssl.SSLContext(ssl.PROTOCOL_TLS) + context.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS) + certs_path = os.environ.get('DOCKER_CERT_PATH', None) + if certs_path is None: + raise ValueError("Cannot create ssl context, " + "DOCKER_CERT_PATH is not set!") + certs_path = Path(certs_path) + context.load_verify_locations(cafile=certs_path / 'ca.pem') + context.load_cert_chain(certfile=certs_path / 'cert.pem', + keyfile=certs_path / 'key.pem') + return context + + @property + def container(self): + """ + An object for managing containers on the server. See the + :ref:`low-level containers documentation ` for full details. + """ + return self._container + + @property + def image(self): + """ + An object for managing images on the server. See the + :ref:`low-level images documentation ` for full details. + """ + return self._image + + @property + def system(self): + return self._system diff --git a/aiodocker/api/container.py b/aiodocker/api/container.py new file mode 100644 index 00000000..26cdd97d --- /dev/null +++ b/aiodocker/api/container.py @@ -0,0 +1,628 @@ +import json +import tarfile +from typing import Any, List, Mapping + +from ..exceptions import DockerError, DockerContainerError +from ..jsonstream import json_stream_result +from ..multiplexed import multiplexed_result +from ..utils import identical, parse_result, clean_filters +from ..types import ContainerConfig, HostConfig, NetworkingConfig, EndpointConfig + +from ..logs import DockerLog + + +class DockerContainerAPI(object): + def __init__(self, api_client): + self.api_client = api_client + + async def list(self, all=False, limit=-1, size=False, filters: Mapping = None) -> List[Mapping]: + """ + List containers. Similar to the ``docker ps`` command. + + Args: + all (bool): Show all containers. Only running containers are shown + by default + limit (int): Show `limit` last created containers, include + non-running ones + size (bool): Display sizes + filters (dict): Filters to be processed on the image list. + Available filters: + + - `exited` (int): Only containers with specified exit code + - `status` (str): One of ``restarting``, ``running``, + ``paused``, ``exited`` + - `label` (str): format either ``"key"`` or ``"key=value"`` + - `id` (str): The id of the container. + - `name` (str): The name of the container. + - `ancestor` (str): Filter by container ancestor. Format of + ``[:tag]``, ````, or + ````. + - `before` (str): Only containers created before a particular + container. Give the container name or id. + - `since` (str): Only containers created after a particular + container. Give container name or id. + + A comprehensive list can be found in the documentation for + `docker ps + `_. + + Returns: + A list of dicts, one per container + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + params = { + 'limit': limit, + 'all': all, + 'size': size + } + if filters: + params['filters'] = clean_filters(filters) + response = await self.api_client._query_json( + "containers/json", + method='GET', + params=params + ) + return response + + async def create(self, image, command=None, hostname=None, user=None, + detach=False, stdin_open=False, tty=False, ports=None, + environment=None, volumes=None, + network_disabled=False, name=None, entrypoint=None, + working_dir=None, domainname=None, host_config=None, + mac_address=None, labels=None, stop_signal=None, + networking_config=None, healthcheck=None, + stop_timeout=None, runtime=None): + """ + Creates a container. Parameters are similar to those for the ``docker + run`` command except it doesn't support the attach options (``-a``). + + The arguments that are passed directly to this function are + host-independent configuration options. Host-specific configuration + is passed with the `host_config` argument. You'll normally want to + use this method in combination with the :py:meth:`create_host_config` + method to generate ``host_config``. + + **Port bindings** + + Port binding is done in two parts: first, provide a list of ports to + open inside the container with the ``ports`` parameter, then declare + bindings with the ``host_config`` parameter. For example: + + .. code-block:: python + + container_id = client.container.create( + 'busybox', 'ls', ports=[1111, 2222], + host_config=client.container.create_host_config(port_bindings={ + 1111: 4567, + 2222: None + }) + ) + + + You can limit the host address on which the port will be exposed like + such: + + .. code-block:: python + + client.container.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)}) + + Or without host port assignment: + + .. code-block:: python + + client.container.create_host_config(port_bindings={1111: ('127.0.0.1',)}) + + If you wish to use UDP instead of TCP (default), you need to declare + ports as such in both the config and host config: + + .. code-block:: python + + container_id = client.container.create( + 'busybox', 'ls', ports=[(1111, 'udp'), 2222], + host_config = client.container.create_host_config(port_bindings={ + '1111/udp': 4567, 2222: None + }) + ) + + To bind multiple host ports to a single container port, use the + following syntax: + + .. code-block:: python + + client.container.create_host_config(port_bindings={ + 1111: [1234, 4567] + }) + + You can also bind multiple IPs to a single container port: + + .. code-block:: python + + client.container.create_host_config(port_bindings={ + 1111: [ + ('192.168.0.100', 1234), + ('192.168.0.101', 1234) + ] + }) + + **Using volumes** + + Volume declaration is done in two parts. Provide a list of + paths to use as mountpoints inside the container with the + ``volumes`` parameter, and declare mappings from paths on the host + in the ``host_config`` section. + + .. code-block:: python + + container_id = client.container.create( + 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], + host_config=cli.create_host_config(binds={ + '/home/user1/': { + 'bind': '/mnt/vol2', + 'mode': 'rw', + }, + '/var/www': { + 'bind': '/mnt/vol1', + 'mode': 'ro', + } + }) + ) + + You can alternatively specify binds as a list. This code is equivalent + to the example above: + + .. code-block:: python + + container_id = api.container.create( + 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], + host_config=cli.create_host_config(binds=[ + '/home/user1/:/mnt/vol2', + '/var/www:/mnt/vol1:ro', + ]) + ) + + **Networking** + + You can specify networks to connect the container to by using the + ``networking_config`` parameter. At the time of creation, you can + only connect a container to a single networking, but you + can create more connections by using + :py:meth:`~connect_container_to_network`. + + For example: + + .. code-block:: python + + networking_config = api.container.create_networking_config({ + 'network1': docker_client.create_endpoint_config( + ipv4_address='172.28.0.124', + aliases=['foo', 'bar'], + links=['container2'] + ) + }) + + ctnr = api.container.create( + img, command, networking_config=networking_config + ) + + Args: + image (str): The image to run + command (str or list): The command to be run in the container + hostname (str): Optional hostname for the container + user (str or int): Username or UID + detach (bool): Detached mode: run container in the background and + return container ID + stdin_open (bool): Keep STDIN open even if not attached + tty (bool): Allocate a pseudo-TTY + ports (list of ints): A list of port numbers + environment (dict or list): A dictionary or a list of strings in + the following format ``["PASSWORD=xxx"]`` or + ``{"PASSWORD": "xxx"}``. + volumes (str or list): List of paths inside the container to use + as volumes. + network_disabled (bool): Disable networking + name (str): A name for the container + entrypoint (str or list): An entrypoint + working_dir (str): Path to the working directory + domainname (str): The domain name to use for the container + host_config (dict): A dictionary created with + :py:meth:`create_host_config`. + mac_address (str): The Mac Address to assign the container + labels (dict or list): A dictionary of name-value labels (e.g. + ``{"label1": "value1", "label2": "value2"}``) or a list of + names of labels to set with empty values (e.g. + ``["label1", "label2"]``) + stop_signal (str): The stop signal to use to stop the container + (e.g. ``SIGINT``). + stop_timeout (int): Timeout to stop the container, in seconds. + Default: 10 + networking_config (dict): A networking configuration generated + by :py:meth:`create_networking_config`. + runtime (str): Runtime to use with this container. + healthcheck (dict): Specify a test to perform to check that the + container is healthy. + + Returns: + A dictionary with an image 'Id' key and a 'Warnings' key. + + Raises: + :py:class:`aiodocker.errors.ImageNotFound` + If the specified image does not exist. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + if isinstance(volumes, str): + volumes = [volumes, ] + + config = self.create_container_config( + image, command, hostname, user, detach, stdin_open, tty, + ports, environment, volumes, + network_disabled, entrypoint, working_dir, domainname, + host_config, mac_address, labels, + stop_signal, networking_config, healthcheck, + stop_timeout, runtime + ) + return await self.create_from_config(config, name) + + def create_container_config(self, *args, **kwargs): + return ContainerConfig(self.api_client.version, *args, **kwargs) + + async def create_from_config(self, config, name=None) -> Mapping[str, Any]: + url = "containers/create" + + config = json.dumps(config, sort_keys=True).encode('utf-8') + kwargs = {} + if name: + kwargs['name'] = name + response = await self.api_client._query_json( + url, + method='POST', + data=config, + params=kwargs + ) + return response + + def create_host_config(self, *args, **kwargs): + """ + Create a dictionary for the ``host_config`` argument to + :py:meth:`create`. + + Args: + auto_remove (bool): enable auto-removal of the container on daemon + side when the container's process exits. + binds (dict): Volumes to bind. See :py:meth:`create_container` + for more information. + blkio_weight_device: Block IO weight (relative device weight) in + the form of: ``[{"Path": "device_path", "Weight": weight}]``. + blkio_weight: Block IO weight (relative weight), accepts a weight + value between 10 and 1000. + cap_add (list of str): Add kernel capabilities. For example, + ``["SYS_ADMIN", "MKNOD"]``. + cap_drop (list of str): Drop kernel capabilities. + cpu_period (int): The length of a CPU period in microseconds. + cpu_quota (int): Microseconds of CPU time that the container can + get in a CPU period. + cpu_shares (int): CPU shares (relative weight). + cpuset_cpus (str): CPUs in which to allow execution (``0-3``, + ``0,1``). + cpuset_mems (str): Memory nodes (MEMs) in which to allow execution + (``0-3``, ``0,1``). Only effective on NUMA systems. + device_cgroup_rules (:py:class:`list`): A list of cgroup rules to + apply to the container. + device_read_bps: Limit read rate (bytes per second) from a device + in the form of: `[{"Path": "device_path", "Rate": rate}]` + device_read_iops: Limit read rate (IO per second) from a device. + device_write_bps: Limit write rate (bytes per second) from a + device. + device_write_iops: Limit write rate (IO per second) from a device. + devices (:py:class:`list`): Expose host devices to the container, + as a list of strings in the form + ``::``. + + For example, ``/dev/sda:/dev/xvda:rwm`` allows the container + to have read-write access to the host's ``/dev/sda`` via a + node named ``/dev/xvda`` inside the container. + dns (:py:class:`list`): Set custom DNS servers. + dns_opt (:py:class:`list`): Additional options to be added to the + container's ``resolv.conf`` file + dns_search (:py:class:`list`): DNS search domains. + extra_hosts (dict): Addtional hostnames to resolve inside the + container, as a mapping of hostname to IP address. + group_add (:py:class:`list`): List of additional group names and/or + IDs that the container process will run as. + init (bool): Run an init inside the container that forwards + signals and reaps processes + init_path (str): Path to the docker-init binary + ipc_mode (str): Set the IPC mode for the container. + isolation (str): Isolation technology to use. Default: `None`. + links (dict or list of tuples): Either a dictionary mapping name + to alias or as a list of ``(name, alias)`` tuples. + log_config (dict): Logging configuration, as a dictionary with + keys: + + - ``type`` The logging driver name. + - ``config`` A dictionary of configuration for the logging + driver. + + lxc_conf (dict): LXC config. + mem_limit (float or str): Memory limit. Accepts float values + (which represent the memory limit of the created container in + bytes) or a string with a units identification char + (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is + specified without a units character, bytes are assumed as an + mem_swappiness (int): Tune a container's memory swappiness + behavior. Accepts number between 0 and 100. + memswap_limit (str or int): Maximum amount of memory + swap a + container is allowed to consume. + mounts (:py:class:`list`): Specification for mounts to be added to + the container. More powerful alternative to ``binds``. Each + item in the list is expected to be a + :py:class:`docker.types.Mount` object. + network_mode (str): One of: + + - ``bridge`` Create a new network stack for the container on + on the bridge network. + - ``none`` No networking for this container. + - ``container:`` Reuse another container's network + stack. + - ``host`` Use the host network stack. + oom_kill_disable (bool): Whether to disable OOM killer. + oom_score_adj (int): An integer value containing the score given + to the container in order to tune OOM killer preferences. + pid_mode (str): If set to ``host``, use the host PID namespace + inside the container. + pids_limit (int): Tune a container's pids limit. Set ``-1`` for + unlimited. + port_bindings (dict): See :py:meth:`create_container` + for more information. + privileged (bool): Give extended privileges to this container. + publish_all_ports (bool): Publish all ports to the host. + read_only (bool): Mount the container's root filesystem as read + only. + restart_policy (dict): Restart the container when it exits. + Configured as a dictionary with keys: + + - ``Name`` One of ``on-failure``, or ``always``. + - ``MaximumRetryCount`` Number of times to restart the + container on failure. + security_opt (:py:class:`list`): A list of string values to + customize labels for MLS systems, such as SELinux. + shm_size (str or int): Size of /dev/shm (e.g. ``1G``). + storage_opt (dict): Storage driver options per container as a + key-value mapping. + sysctls (dict): Kernel parameters to set in the container. + tmpfs (dict): Temporary filesystems to mount, as a dictionary + mapping a path inside the container to options for that path. + + For example: + + .. code-block:: python + + { + '/mnt/vol2': '', + '/mnt/vol1': 'size=3G,uid=1000' + } + + ulimits (:py:class:`list`): Ulimits to set inside the container, + as a list of dicts. + userns_mode (str): Sets the user namespace mode for the container + when user namespace remapping option is enabled. Supported + values are: ``host`` + volumes_from (:py:class:`list`): List of container names or IDs to + get volumes from. + runtime (str): Runtime to use with this container. + + + Returns: + (dict) A dictionary which can be passed to the ``host_config`` + argument to :py:meth:`create`. + + Example: + + .. code-block:: python + + api.container.create_host_config(privileged=True, cap_drop=['MKNOD'], + volumes_from=['nostalgic_newton']) + {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, + 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False} + + """ + if not kwargs: + kwargs = {} + if 'version' in kwargs: + raise TypeError( + "create_host_config() got an unexpected " + "keyword argument 'version'" + ) + kwargs['version'] = self.api_client.version + return HostConfig(*args, **kwargs) + + def create_networking_config(self, *args, **kwargs): + """ + Create a networking config dictionary to be used as the + ``networking_config`` parameter in :py:meth:`create`. + + Args: + endpoints_config (dict): A dictionary mapping network names to + endpoint configurations generated by + :py:meth:`create_endpoint_config`. + + Returns: + (dict) A networking config. + + Example: + + .. code-block:: python + + api.container.create_network('network1') + networking_config = api.container.create_networking_config({ + 'network1': api.container.create_endpoint_config() + }) + container = api.container.create( + img, command, networking_config=networking_config + ) + + """ + return NetworkingConfig(*args, **kwargs) + + def create_endpoint_config(self, *args, **kwargs): + """ + Create an endpoint config dictionary to be used with + :py:meth:`create_networking_config`. + + Args: + aliases (:py:class:`list`): A list of aliases for this endpoint. + Names in that list can be used within the network to reach the + container. Defaults to ``None``. + links (:py:class:`list`): A list of links for this endpoint. + Containers declared in this list will be linked to this + container. Defaults to ``None``. + ipv4_address (str): The IP address of this container on the + network, using the IPv4 protocol. Defaults to ``None``. + ipv6_address (str): The IP address of this container on the + network, using the IPv6 protocol. Defaults to ``None``. + link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) + addresses. + + Returns: + (dict) An endpoint config. + + Example: + + .. code-block:: python + + endpoint_config = api.container.create_endpoint_config( + aliases=['web', 'app'], + links=['app_db'], + ipv4_address='132.65.0.123' + ) + + """ + return EndpointConfig(self.api_client.version, *args, **kwargs) + + async def remove(self, container, v=False, link=False, force=False): + """ + Remove a container. Similar to the ``docker rm`` command. + + Args: + container (str): The container to remove + v (bool): Remove the volumes associated with the container + link (bool): Remove the specified link and not the underlying + container + force (bool): Force the removal of a running container (uses + ``SIGKILL``) + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + params = { + 'v': v, + 'link': link, + 'force': force + } + + response = await self.api_client._query( + "containers/{id}".format(id=container), + method='DELETE', + params=params + ) + await response.release() + return + + async def inspect(self, container_id, size=False) -> Mapping[str, Any]: + params = { + 'size': size + } + response = await self.api_client._query_json( + "containers/{id}/json".format(id=container_id), + method='GET', + params=params + ) + return response + + async def logs(self, container_id, stdout=False, stderr=False, follow=False, **kwargs): + if stdout is False and stderr is False: + raise TypeError("Need one of stdout or stderr") + + params = { + "stdout": stdout, + "stderr": stderr, + "follow": follow, + } + params.update(kwargs) + + inspect_info = await self.inspect(container_id) + is_tty = inspect_info['Config']['Tty'] + + response = await self.api_client._query( + "containers/{id}/logs".format(id=container_id), + method='GET', + params=params, + ) + return await multiplexed_result(response, follow, is_tty=is_tty) + + async def prune(self, filters: Mapping = None) -> Mapping[str, Any]: + """ + Delete stopped containers + + Args: + filters (dict): Filters to process on the prune list. + + Returns: + (dict): A dict containing a list of deleted container IDs and + the amount of disk space reclaimed in bytes. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + params = {} + if filters: + params['filters'] = clean_filters(filters) + response = await self.api_client._query_json( + "containers/prune", + method='POST', + params=params + ) + return response + + async def restart(self, container_id, timeout=None): + params = {} + if timeout is not None: + params['t'] = timeout + response = await self.api_client._query( + "containers/{id}/restart".format(id=container_id), + method='POST', + params=params + ) + await response.release() + return + + async def start(self, container_id): + response = await self.api_client._query( + "containers/{}/start".format(container_id), + method='POST' + ) + await response.release() + return + + async def stop(self, container_id, **kwargs): + response = await self.api_client._query( + "containers/{id}/stop".format(id=container_id), + method='POST', + params=kwargs + ) + await response.release() + return + + async def wait(self, container_id, timeout=None, **kwargs): + data = await self.api_client._query_json( + "containers/{id}/wait".format(id=container_id), + method='POST', + params=kwargs, + timeout=timeout, + ) + return data diff --git a/aiodocker/api/image.py b/aiodocker/api/image.py new file mode 100644 index 00000000..b5152c1f --- /dev/null +++ b/aiodocker/api/image.py @@ -0,0 +1,331 @@ +import json +import tarfile +from typing import ( + Optional, Union, Any, + List, MutableMapping, Mapping, + BinaryIO, +) + +from ..jsonstream import json_stream_result +from ..multiplexed import multiplexed_result +from ..utils import identical, parse_result, clean_filters, compose_auth_header, clean_map + + +class DockerImageAPI(object): + def __init__(self, api_client): + self.api_client = api_client + + async def build(self, *, + remote: str=None, + fileobj: BinaryIO=None, + path_dockerfile: str=None, + tag: str=None, + quiet: bool=False, + nocache: bool=False, + buildargs: Mapping=None, + pull: bool=False, + rm: bool=True, + forcerm: bool=False, + labels: Mapping=None, + stream: bool=False, + encoding: str=None) -> Mapping: + """ + Build an image given a remote Dockerfile + or a file object with a Dockerfile inside + + Args: + path_dockerfile: path within the build context to the Dockerfile + remote: a Git repository URI or HTTP/HTTPS context URI + quiet: suppress verbose build output + nocache: do not use the cache when building the image + rm: remove intermediate containers after a successful build + pull: downloads any updates to the FROM image in Dockerfiles + encoding: set `Content-Encoding` for the file object your send + forcerm: always remove intermediate containers, even upon failure + labels: arbitrary key/value labels to set on the image + fileobj: a tar archive compressed or not + tag (str): A tag to add to the final image + buildargs (dict): A dictionary of build arguments + stream: + """ + + local_context = None + + headers = {} + + params = { + 't': tag, + 'rm': rm, + 'q': quiet, + 'pull': pull, + 'remote': remote, + 'nocache': nocache, + 'forcerm': forcerm, + 'dockerfile': path_dockerfile, + } + + if remote is None and fileobj is None: + raise ValueError("You need to specify either remote or fileobj") + + if fileobj and remote: + raise ValueError("You cannot specify both fileobj and remote") + + if fileobj and not encoding: + raise ValueError("You need to specify an encoding") + + if remote is None and fileobj is None: + raise ValueError("Either remote or fileobj needs to be provided.") + + if fileobj: + local_context = fileobj.read() + headers["content-type"] = "application/x-tar" + + if fileobj and encoding: + headers['Content-Encoding'] = encoding + + if buildargs: + params.update({'buildargs': json.dumps(buildargs)}) + + if labels: + params.update({'labels': json.dumps(labels)}) + + response = await self.api_client._query( + "build", + "POST", + params=clean_map(params), + headers=headers, + data=local_context + ) + + return await json_stream_result(response, stream=stream) + + async def history(self, image: str) -> Mapping: + """ + Show the history of an image. + + Args: + image (str): The image to show history for + + Returns: + (str): The history of the image + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + response = await self.api_client._query_json( + "images/{name}/history".format(name=image), + ) + return response + + async def inspect(self, image: str) -> Mapping[str, Any]: + """ + Get detailed information about an image. Similar to the ``docker + image inspect`` command. + + Args: + image (str): The image to inspect + + Returns: + (dict): Similar to the output of ``docker image inspect``, but as a + single dict + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + response = await self.api_client._query_json( + "images/{image}/json".format(image=image), + ) + return response + + async def list(self, name=None, all=False, digests=False, filters: Mapping=None) -> List[Mapping]: + """ + List images. Similar to the ``docker images`` command. + + Args: + name (str): Only show images belonging to the repository ``name`` + all (bool): Show intermediate image layers. By default, these are + filtered out. + digests (bool): Show digest information as a RepoDigests field on each image. + filters (dict): Filters to be processed on the image list. + Available filters: + - ``dangling`` (bool) + - ``label`` (str): format either ``key`` or ``key=value`` + - before=([:], or ) + - reference=([:]) + - since=([:], or ) + + Returns: + A dictionary. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + params = { + 'all': all, + 'digests': digests + } + if name: + params['filter'] = name + if filters: + params['filters'] = clean_filters(filters) + response = await self.api_client._query_json( + "images/json", "GET", + params=params, + ) + return response + + async def pull(self, name: str, *, + auth_config: Optional[Union[MutableMapping, str, bytes]]=None, + tag: str=None, + repo: str=None, + stream: bool=False, + platform=None) -> Mapping: + """ + Similar to `docker pull`, pull an image locally + + Args: + name: name of the image to pull + repo: repository name given to an image when it is imported + tag: if empty when pulling an image all tags + for the given image to be pulled + auth_config: special {'auth': base64} pull private repo + stream: + platform (str): Platform in the format ``os[/arch[/variant]]`` + """ + params = { + 'fromImage': name, + } + headers = {} + if repo: + params['repo'] = repo + if tag: + params['tag'] = tag + if auth_config is not None: + registry, has_registry_host, _ = name.partition('/') + if not has_registry_host: + raise ValueError('Image should have registry host ' + 'when auth information is provided') + # TODO: assert registry == repo? + headers['X-Registry-Auth'] = compose_auth_header(auth_config, registry) + response = await self.api_client._query( + "images/create", + "POST", + params=clean_map(params), + headers=headers, + ) + return await json_stream_result(response, stream=stream) + + async def push(self, repository: str, *, + auth_config: Union[MutableMapping, str, bytes]=None, + tag: str=None, + stream: bool=False) -> Mapping: + """ + Push an image or a repository to the registry. Similar to the ``docker + push`` command. + + Args: + repository (str): The repository to push to + tag (str): An optional tag to push + stream (bool): Stream the output as a blocking generator + auth_config (dict): Override the credentials that + :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for + this request. ``auth_config`` should contain the ``username`` + and ``password`` keys to be valid. + decode (bool): Decode the JSON data from the server into dicts. + Only applies with ``stream=True`` + + Returns: + (generator or str): The output from the server. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Example: + >>> for line in client.image.push('yourname/app', stream=True): + ... print line + {"status":"Pushing repository yourname/app (1 tags)"} + {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"} + {"status":"Image already pushed, skipping","progressDetail":{}, + "id":"511136ea3c5a"} + ... + + """ + params = {} + headers = { + # Anonymous push requires a dummy auth header. + 'X-Registry-Auth': 'placeholder', + } + if tag: + params['tag'] = tag + if auth_config is not None: + registry, has_registry_host, _ = repository.partition('/') + if not has_registry_host: + raise ValueError('Image should have registry host ' + 'when auth information is provided') + headers['X-Registry-Auth'] = compose_auth_header(auth_config, registry) + response = await self.api_client._query( + "images/{name}/push".format(name=repository), + "POST", + params=params, + headers=headers, + ) + return await json_stream_result(response, stream=stream) + + async def remove(self, name: str, *, force: bool=False, + noprune: bool=False) -> List: + """ + Remove an image along with any untagged parent + images that were referenced by that image + + Args: + name: name/id of the image to delete + force: remove the image even if it is being used + by stopped containers or has other tags + noprune: don't delete untagged parent images + + Returns: + List of deleted images + """ + params = {'force': force, 'noprune': noprune} + response = await self.api_client._query_json( + "images/{name}".format(name=name), + "DELETE", + params=params, + ) + return response + + async def tag(self, image: str, repository: str, *, tag: str=None) -> bool: + """ + Tag an image into a repository. Similar to the ``docker tag`` command. + + Args: + image (str): The image to tag + repository (str): The repository to set for the tag + tag (str): The tag name + + Returns: + (bool): ``True`` if successful + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Example: + + >>> client.image.tag('ubuntu', 'localhost:5000/ubuntu', 'latest') + """ + params = {"repo": repository} + + if tag: + params["tag"] = tag + + await self.api_client._query_json( + "images/{image}/tag".format(image=image), + "POST", + params=params, + ) + return True diff --git a/aiodocker/api/system.py b/aiodocker/api/system.py new file mode 100644 index 00000000..60239033 --- /dev/null +++ b/aiodocker/api/system.py @@ -0,0 +1,20 @@ + + +class DockerSystemAPI(object): + def __init__(self, api_client): + self.api_client = api_client + + async def info(self): + """ + Display system-wide information. Identical to the ``docker info`` + command. + + Returns: + (dict): The info as a dict + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + data = await self.api_client._query_json("info") + return data diff --git a/aiodocker/client.py b/aiodocker/client.py new file mode 100644 index 00000000..9a213089 --- /dev/null +++ b/aiodocker/client.py @@ -0,0 +1,45 @@ +from .api.client import APIClient +from .models.containers import ContainerCollection +from .models.images import ImageCollection + + +class DockerClient(object): + def __init__(self, *args, **kwargs): + self.api = APIClient(*args, **kwargs) + + @property + def containers(self): + """ + An object for managing containers on the server. See the + :doc:`containers documentation ` for full details. + """ + return ContainerCollection(client=self) + + @property + def images(self): + """ + An object for managing images on the server. See the + :doc:`images documentation ` for full details. + """ + return ImageCollection(client=self) + + async def close(self): + await self.api.close() + + async def version(self): + return await self.api.version() + version.__doc__ = APIClient.version.__doc__ + + async def info(self): + """ + Display system-wide information. Identical to the ``docker info`` + command. + + Returns: + (dict): The info as a dict + + Raises: + :py:class:`docker.errors.APIError` + If the server returns an error. + """ + return await self.api.system.info() diff --git a/aiodocker/errors.py b/aiodocker/errors.py new file mode 100644 index 00000000..24e3f8f8 --- /dev/null +++ b/aiodocker/errors.py @@ -0,0 +1,119 @@ +import json + + +class DockerException(Exception): + """ + A base class from which all other exceptions inherit. + + If you want to catch all errors that the Docker SDK might raise, + catch this base exception. + """ + + +async def create_api_error_from_response(response): + """ + Create a suitable APIError from ClientResponse. + """ + what = await response.read() + content_type = response.headers.get('content-type', '') + response.close() + if content_type == 'application/json': + explanation = json.loads(what.decode('utf8'))['message'] + else: + explanation = what.decode('utf8') + cls = APIError + if response.status == 404: + if explanation and ('No such image' in str(explanation) or + 'not found: does not exist or no pull access' + in str(explanation) or + 'repository does not exist' in str(explanation)): + cls = ImageNotFound + else: + cls = NotFound + raise cls(response=response, explanation=explanation) + + +class APIError(DockerException): + """ + An HTTP error from the API. + """ + def __init__(self, response=None, explanation=None): + self.response = response + self.explanation = explanation + + def __str__(self): + message = super(APIError, self).__str__() + + if self.is_client_error(): + message = '{0} Client Error: {1}'.format(self.response.status, self.response.reason) + + elif self.is_server_error(): + message = '{0} Server Error: {1}'.format(self.response.status, self.response.reason) + + if self.explanation: + message = '{0} ("{1}")'.format(message, self.explanation) + + return message + + @property + def status_code(self): + if self.response is not None: + return self.response.status + + def is_client_error(self): + if self.status_code is None: + return False + return 400 <= self.status_code < 500 + + def is_server_error(self): + if self.status_code is None: + return False + return 500 <= self.status_code < 600 + + +class NotFound(APIError): + pass + + +class ImageNotFound(NotFound): + pass + + +class InvalidVersion(DockerException): + pass + + +class ContainerError(DockerException): + """ + Represents a container that has exited with a non-zero exit code. + """ + def __init__(self, container, exit_status, command, image, stderr): + self.container = container + self.exit_status = exit_status + self.command = command + self.image = image + self.stderr = stderr + + err = ": {}".format(stderr) if stderr is not None else "" + msg = ("Command '{}' in image '{}' returned non-zero exit " + "status {}{}").format(command, image, exit_status, err) + + super(ContainerError, self).__init__(msg) + + +class BuildError(DockerException): + def __init__(self, reason, build_log): + super(BuildError, self).__init__(reason) + self.msg = reason + self.build_log = build_log + + +def create_unexpected_kwargs_error(name, kwargs): + quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)] + text = ["{}() ".format(name)] + if len(quoted_kwargs) == 1: + text.append("got an unexpected keyword argument ") + else: + text.append("got unexpected keyword arguments ") + text.append(', '.join(quoted_kwargs)) + return TypeError(''.join(text)) diff --git a/aiodocker/models/__init__.py b/aiodocker/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/aiodocker/models/containers.py b/aiodocker/models/containers.py new file mode 100644 index 00000000..de24b0d9 --- /dev/null +++ b/aiodocker/models/containers.py @@ -0,0 +1,452 @@ +import copy +import ntpath + +from .resource import Collection, Model +from ..errors import ImageNotFound, ContainerError, APIError, create_unexpected_kwargs_error +from ..exceptions import DockerContainerError +from ..types import HostConfig +from .images import Image + + +class Container(Model): + + @property + def name(self): + """ + The name of the container. + """ + if self.attrs.get('Name') is not None: + return self.attrs['Name'].lstrip('/') + + @property + def labels(self): + """ + The labels of a container as dictionary. + """ + result = self.attrs['Config'].get('Labels') + return result or {} + + @property + def status(self): + """ + The status of the container. For example, ``running``, or ``exited``. + """ + return self.attrs['State']['Status'] + + async def image(self): + """ + The image of the container. + """ + image_id = self.attrs['Image'] + if image_id is None: + return None + return await self.client.api.image.get(image_id.split(':')[1]) + + async def logs(self, **kwargs): + """ + Get logs from this container. Similar to the ``docker logs`` command. + + The ``stream`` parameter makes the ``logs`` function return a blocking + generator you can iterate over to retrieve log output as it happens. + + Args: + stdout (bool): Get ``STDOUT`` + stderr (bool): Get ``STDERR`` + stream (bool): Stream the response + timestamps (bool): Show timestamps + tail (str or int): Output specified number of lines at the end of + logs. Either an integer of number of lines or the string + ``all``. Default ``all`` + since (datetime or int): Show logs since a given datetime or + integer epoch (in seconds) + follow (bool): Follow log output + until (datetime or int): Show logs that occurred before the given + datetime or integer epoch (in seconds) + + Returns: + (generator or str): Logs from the container. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.logs(self.id, **kwargs) + + async def remove(self, v=False, link=False, force=False): + """ + Remove this container. Similar to the ``docker rm`` command. + + Args: + v (bool): Remove the volumes associated with the container + link (bool): Remove the specified link and not the underlying + container + force (bool): Force the removal of a running container (uses + ``SIGKILL``) + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.remove(self.id, v=v, link=link, force=force) + + async def restart(self, timeout=None): + """ + Restart this container. Similar to the ``docker restart`` command. + + Args: + timeout (int): Number of seconds to try to stop for before killing + the container. Once killed it will then be restarted. Default + is 10 seconds. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.restart(self.id, timeout) + + async def start(self, **kwargs): + """ + Start this container. Similar to the ``docker start`` command, but + doesn't support attach options. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.start(self.id, **kwargs) + + def stop(self, **kwargs): + """ + Stops a container. Similar to the ``docker stop`` command. + + Args: + timeout (int): Timeout in seconds to wait for the container to + stop before sending a ``SIGKILL``. Default: 10 + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return self.client.api.container.stop(self.id, **kwargs) + + async def wait(self, **kwargs): + """ + Block until the container stops, then return its exit code. Similar to + the ``docker wait`` command. + + Args: + timeout (int): Request timeout + condition (str): Wait until a container state reaches the given + condition, either ``not-running`` (default), ``next-exit``, + or ``removed`` + + Returns: + (dict): The API's response as a Python dictionary, including + the container's exit code under the ``StatusCode`` attribute. + + Raises: + :py:class:`aiohttp.ServerTimeoutError` + If the timeout is exceeded. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.wait(self.id, **kwargs) + + +class ContainerCollection(Collection): + model = Container + + async def get(self, container_id): + """ + Get a container by name or ID. + + Args: + container_id (str): Container name or ID. + + Returns: + A :py:class:`Container` object. + + Raises: + :py:class:`aiodocker.errors.NotFound` + If the container does not exist. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + resp = await self.client.api.container.inspect(container_id) + return self.prepare_model(resp) + + async def create(self, image, command=None, **kwargs): + """ + Create a container without starting it. Similar to ``docker create``. + + Takes the same arguments as :py:meth:`run`, except for ``stdout``, + ``stderr``, and ``remove``. + + Returns: + A :py:class:`Container` object. + + Raises: + :py:class:`aiodocker.errors.ImageNotFound` + If the specified image does not exist. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + if isinstance(image, Image): + image = image.id + kwargs['image'] = image + kwargs['command'] = command + kwargs['version'] = self.client.api.api_version + create_kwargs = _create_container_args(kwargs) + resp = await self.client.api.container.create(**create_kwargs) + return await self.get(resp['Id']) + + async def list(self, all=False, limit=-1, filters=None): + """ + List containers. Similar to the ``docker ps`` command. + + Args: + all (bool): Show all containers. Only running containers are shown + by default + limit (int): Show `limit` last created containers, include + non-running ones + filters (dict): Filters to be processed on the image list. + Available filters: + + - `exited` (int): Only containers with specified exit code + - `status` (str): One of ``restarting``, ``running``, + ``paused``, ``exited`` + - `label` (str): format either ``"key"`` or ``"key=value"`` + - `id` (str): The id of the container. + - `name` (str): The name of the container. + - `ancestor` (str): Filter by container ancestor. Format of + ``[:tag]``, ````, or + ````. + - `before` (str): Only containers created before a particular + container. Give the container name or id. + - `since` (str): Only containers created after a particular + container. Give container name or id. + + A comprehensive list can be found in the documentation for + `docker ps + `_. + + Returns: + (list of :py:class:`Container`) + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + resp = await self.client.api.container.list(all=all, limit=limit, filters=filters) + return [await self.get(r['Id']) for r in resp] + + async def run(self, image, command=None, stdout=True, stderr=False, + remove=False, **kwargs): + """ + Create and start a container. + + If container.start() will raise an error the exception will contain + a `container_id` attribute with the id of the container. + """ + if isinstance(image, Image): + image = image.id + stream = kwargs.pop('stream', False) + detach = kwargs.pop('detach', False) + platform = kwargs.pop('platform', None) + + if detach and remove: + kwargs["auto_remove"] = True + + if kwargs.get('network') and kwargs.get('network_mode'): + raise RuntimeError( + 'The options "network" and "network_mode" can not be used ' + 'together.' + ) + + try: + container = await self.create(image=image, command=command, + detach=detach, **kwargs) + except ImageNotFound: + await self.client.images.pull(image, platform=platform) + container = await self.create(image=image, command=command, + detach=detach, **kwargs) + + try: + await container.start() + except APIError as err: + raise DockerContainerError( + err.status_code, + { + "message": err.explanation + }, + container.id + ) + + return container + + async def prune(self, filters=None): + """ + Delete stopped containers + + Args: + filters (dict): Filters to process on the prune list. + + Returns: + (dict): A dict containing a list of deleted container IDs and + the amount of disk space reclaimed in bytes. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.container.prune(filters=filters) + + +# kwargs to copy straight from run to create +RUN_CREATE_KWARGS = [ + 'command', + 'detach', + 'domainname', + 'entrypoint', + 'environment', + 'healthcheck', + 'hostname', + 'image', + 'labels', + 'mac_address', + 'name', + 'network_disabled', + 'stdin_open', + 'stop_signal', + 'tty', + 'user', + 'volume_driver', + 'working_dir', +] + +# kwargs to copy straight from run to host_config +RUN_HOST_CONFIG_KWARGS = [ + 'auto_remove', + 'blkio_weight_device', + 'blkio_weight', + 'cap_add', + 'cap_drop', + 'cgroup_parent', + 'cpu_count', + 'cpu_percent', + 'cpu_period', + 'cpu_quota', + 'cpu_shares', + 'cpuset_cpus', + 'cpuset_mems', + 'cpu_rt_period', + 'cpu_rt_runtime', + 'device_cgroup_rules', + 'device_read_bps', + 'device_read_iops', + 'device_write_bps', + 'device_write_iops', + 'devices', + 'dns_opt', + 'dns_search', + 'dns', + 'extra_hosts', + 'group_add', + 'init', + 'init_path', + 'ipc_mode', + 'isolation', + 'kernel_memory', + 'links', + 'log_config', + 'lxc_conf', + 'mem_limit', + 'mem_reservation', + 'mem_swappiness', + 'memswap_limit', + 'mounts', + 'nano_cpus', + 'network_mode', + 'oom_kill_disable', + 'oom_score_adj', + 'pid_mode', + 'pids_limit', + 'privileged', + 'publish_all_ports', + 'read_only', + 'restart_policy', + 'security_opt', + 'shm_size', + 'storage_opt', + 'sysctls', + 'tmpfs', + 'ulimits', + 'userns_mode', + 'version', + 'volumes_from', + 'runtime' +] + + +def _create_container_args(kwargs): + """ + Convert arguments to create() to arguments to create_container(). + """ + # Copy over kwargs which can be copied directly + create_kwargs = {} + for key in copy.copy(kwargs): + if key in RUN_CREATE_KWARGS: + create_kwargs[key] = kwargs.pop(key) + host_config_kwargs = {} + for key in copy.copy(kwargs): + if key in RUN_HOST_CONFIG_KWARGS: + host_config_kwargs[key] = kwargs.pop(key) + + # Process kwargs which are split over both create and host_config + ports = kwargs.pop('ports', {}) + if ports: + host_config_kwargs['port_bindings'] = ports + + volumes = kwargs.pop('volumes', {}) + if volumes: + host_config_kwargs['binds'] = volumes + + network = kwargs.pop('network', None) + if network: + create_kwargs['networking_config'] = { + network: None + } + host_config_kwargs['network_mode'] = network + + # All kwargs should have been consumed by this point, so raise + # error if any are left + if kwargs: + raise create_unexpected_kwargs_error('run', kwargs) + + create_kwargs['host_config'] = HostConfig(**host_config_kwargs) + + # Fill in any kwargs which need processing by create_host_config first + port_bindings = create_kwargs['host_config'].get('PortBindings') + if port_bindings: + # sort to make consistent for tests + create_kwargs['ports'] = [tuple(p.split('/', 1)) + for p in sorted(port_bindings.keys())] + if volumes: + if isinstance(volumes, dict): + create_kwargs['volumes'] = [ + v.get('bind') for v in volumes.values() + ] + else: + create_kwargs['volumes'] = [ + _host_volume_from_bind(v) for v in volumes + ] + return create_kwargs + + +def _host_volume_from_bind(bind): + drive, rest = ntpath.splitdrive(bind) + bits = rest.split(':', 1) + if len(bits) == 1 or bits[1] in ('ro', 'rw'): + return drive + bits[0] + else: + return bits[1].rstrip(':ro').rstrip(':rw') diff --git a/aiodocker/models/images.py b/aiodocker/models/images.py new file mode 100644 index 00000000..d3bb29eb --- /dev/null +++ b/aiodocker/models/images.py @@ -0,0 +1,341 @@ +import re + +from .resource import Model, Collection +from ..errors import BuildError +from ..utils.utils import parse_repository_tag, clean_map + + +class Image(Model): + """ + An image on the server. + """ + def __repr__(self): + return "<{}: '{}'>".format(self.__class__.__name__, "', '".join(self.tags)) + + @property + def labels(self): + """ + The labels of an image as dictionary. + """ + result = self.attrs['Config'].get('Labels') + return result or {} + + @property + def short_id(self): + """ + The ID of the image truncated to 10 characters, plus the ``sha256:`` + prefix. + """ + if self.id.startswith('sha256:'): + return self.id[:17] + return self.id[:10] + + @property + def tags(self): + """ + The image's tags. + """ + tags = self.attrs.get('RepoTags') + if tags is None: + tags = [] + return [tag for tag in tags if tag != ':'] + + async def history(self): + """ + Show the history of an image. + + Returns: + (str): The history of the image. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return await self.client.api.image.history(self.id) + + # def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE): + # """ + # Get a tarball of an image. Similar to the ``docker save`` command. + # + # Args: + # chunk_size (int): The number of bytes returned by each iteration + # of the generator. If ``None``, data will be streamed as it is + # received. Default: 2 MB + # + # Returns: + # (generator): A stream of raw archive data. + # + # Raises: + # :py:class:`docker.errors.APIError` + # If the server returns an error. + # + # Example: + # + # >>> image = cli.get_image("busybox:latest") + # >>> f = open('/tmp/busybox-latest.tar', 'w') + # >>> for chunk in image: + # >>> f.write(chunk) + # >>> f.close() + # """ + # return self.client.api.get_image(self.id, chunk_size) + + def tag(self, repository, tag=None): + """ + Tag this image into a repository. Similar to the ``docker tag`` + command. + + Args: + repository (str): The repository to set for the tag + tag (str): The tag name + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Returns: + (bool): ``True`` if successful + """ + return self.client.api.image.tag(self.id, repository, tag=tag) + + +class ImageCollection(Collection): + model = Image + + async def build(self, **kwargs): + """ + Build an image and return it. Similar to the ``docker build`` + command. Either ``path`` or ``fileobj`` must be set. + + If you have a tar file for the Docker build context (including a + Dockerfile) already, pass a readable file-like object to ``fileobj`` + and also pass ``custom_context=True``. If the stream is compressed + also, set ``encoding`` to the correct value (e.g ``gzip``). + + If you want to get the raw output of the build, use the + :py:meth:`~docker.api.build.BuildApiMixin.build` method in the + low-level API. + + Args: + path (str): Path to the directory containing the Dockerfile + fileobj: A file object to use as the Dockerfile. (Or a file-like + object) + tag (str): A tag to add to the final image + quiet (bool): Whether to return the status + nocache (bool): Don't use the cache when set to ``True`` + rm (bool): Remove intermediate containers. The ``docker build`` + command now defaults to ``--rm=true``, but we have kept the old + default of `False` to preserve backward compatibility + timeout (int): HTTP timeout + custom_context (bool): Optional if using ``fileobj`` + encoding (str): The encoding for a stream. Set to ``gzip`` for + compressing + pull (bool): Downloads any updates to the FROM image in Dockerfiles + forcerm (bool): Always remove intermediate containers, even after + unsuccessful builds + dockerfile (str): path within the build context to the Dockerfile + buildargs (dict): A dictionary of build arguments + container_limits (dict): A dictionary of limits applied to each + container created by the build process. Valid keys: + + - memory (int): set memory limit for build + - memswap (int): Total memory (memory + swap), -1 to disable + swap + - cpushares (int): CPU shares (relative weight) + - cpusetcpus (str): CPUs in which to allow execution, e.g., + ``"0-3"``, ``"0,1"`` + shmsize (int): Size of `/dev/shm` in bytes. The size must be + greater than 0. If omitted the system uses 64MB + labels (dict): A dictionary of labels to set on the image + cache_from (list): A list of images used for build cache + resolution + target (str): Name of the build-stage to build in a multi-stage + Dockerfile + network_mode (str): networking mode for the run commands during + build + squash (bool): Squash the resulting images layers into a + single layer. + extra_hosts (dict): Extra hosts to add to /etc/hosts in building + containers, as a mapping of hostname to IP address. + platform (str): Platform in the format ``os[/arch[/variant]]``. + + Returns: + (tuple): The first item is the :py:class:`Image` object for the + image that was build. The second item is a generator of the + build logs as JSON-decoded objects. + + Raises: + :py:class:`docker.errors.BuildError` + If there is an error during the build. + :py:class:`docker.errors.APIError` + If the server returns any other error. + ``TypeError`` + If neither ``path`` nor ``fileobj`` is specified. + """ + json_stream = await self.client.api.image.build(**kwargs) + if isinstance(json_stream, str): + return self.get(json_stream) + last_event = None + image_id = None + for chunk in json_stream: + if 'error' in chunk: + raise BuildError(chunk['error'], json_stream) + if 'stream' in chunk: + match = re.search( + r'(^Successfully built |sha256:)([0-9a-f]+)$', + chunk['stream'] + ) + if match: + image_id = match.group(2) + last_event = chunk + if image_id: + return await self.get(image_id), json_stream + raise BuildError(last_event or 'Unknown', json_stream) + + async def get(self, name): + """ + Gets an image. + + Args: + name (str): The name of the image. + + Returns: + (:py:class:`Image`): The image. + + Raises: + :py:class:`aiodocker.errors.ImageNotFound` + If the image does not exist. + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + return self.prepare_model(await self.client.api.image.inspect(name)) + + async def list(self, name=None, all=False, digests=False, filters=None): + """ + List images on the server. + + Args: + name (str): Only show images belonging to the repository ``name`` + all (bool): Show intermediate image layers. By default, these are + filtered out. + digests (bool): Show digest information as a RepoDigests field on each image. + filters (dict): Filters to be processed on the image list. + Available filters: + - ``dangling`` (bool) + - ``label`` (str): format either ``key`` or ``key=value`` + - before=([:], or ) + - reference=([:]) + - since=([:], or ) + + Returns: + (list of :py:class:`Image`): The images. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + """ + resp = await self.client.api.image.list(name=name, all=all, digests=digests, filters=filters) + return [await self.get(r["Id"]) for r in resp] + + async def create(self, attrs=None): + pass + + async def pull(self, repository, tag=None, **kwargs): + """ + Pull an image of the given name and return it. Similar to the + ``docker pull`` command. + If no tag is specified, all tags from that repository will be + pulled. + + If you want to get the raw pull output, use the + :py:meth:`~aiodocker.api.image.pull` method in the + low-level API. + + Args: + repository (str): The repository to pull + tag (str): The tag to pull + auth_config (dict): Override the credentials that + :py:meth:`~aiodocker.client.DockerClient.login` has set for + this request. ``auth_config`` should contain the ``username`` + and ``password`` keys to be valid. + + Returns: + (:py:class:`Image` or list): The image that has been pulled. + If no ``tag`` was specified, the method will return a list + of :py:class:`Image` objects belonging to this repository. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Example: + + .. code-block:: python + + >>> # Pull the image tagged `latest` in the busybox repo + >>> image = client.images.pull('busybox:latest') + + >>> # Pull all tags in the busybox repo + >>> images = client.images.pull('busybox') + """ + if not tag: + repository, tag = parse_repository_tag(repository) + + await self.client.api.image.pull(repository, tag=tag, **kwargs) + if tag: + return await self.get('{0}{2}{1}'.format( + repository, tag, '@' if tag.startswith('sha256:') else ':' + )) + return await self.list(repository) + + async def push(self, repository, tag=None, **kwargs): + """ + Push an image or a repository to the registry. Similar to the ``docker + push`` command. + + Args: + repository (str): The repository to push to + tag (str): An optional tag to push + stream (bool): Stream the output as a blocking generator + auth_config (dict): Override the credentials that + :py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for + this request. ``auth_config`` should contain the ``username`` + and ``password`` keys to be valid. + decode (bool): Decode the JSON data from the server into dicts. + Only applies with ``stream=True`` + + Returns: + (generator or str): The output from the server. + + Raises: + :py:class:`aiodocker.errors.APIError` + If the server returns an error. + + Example: + >>> for line in client.images.push('yourname/app', stream=True): + ... print line + {"status":"Pushing repository yourname/app (1 tags)"} + {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"} + {"status":"Image already pushed, skipping","progressDetail":{}, + "id":"511136ea3c5a"} + ... + + """ + return await self.client.api.image.push(repository, tag=tag, **kwargs) + + async def remove(self, name: str, force: bool = False, noprune: bool = False): + """ + Remove an image along with any untagged parent + images that were referenced by that image + + Args: + name: name/id of the image to delete + force: remove the image even if it is being used + by stopped containers or has other tags + noprune: don't delete untagged parent images + + Returns: + List of deleted images + """ + return await self.client.api.image.remove(name, force=force, noprune=noprune) + + diff --git a/aiodocker/models/resource.py b/aiodocker/models/resource.py new file mode 100644 index 00000000..121b80b5 --- /dev/null +++ b/aiodocker/models/resource.py @@ -0,0 +1,92 @@ + +class Model(object): + """ + A base class for representing a single object on the server. + """ + id_attribute = 'Id' + + def __init__(self, attrs=None, client=None, collection=None): + #: A client pointing at the server that this object is on. + self.client = client + + #: The collection that this model is part of. + self.collection = collection + + #: The raw representation of this object from the API + self.attrs = attrs + if self.attrs is None: + self.attrs = {} + + def __repr__(self): + return "<{}: {}>".format(self.__class__.__name__, self.short_id) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.id == other.id + + def __hash__(self): + return hash("{}:{}".format(self.__class__.__name__, self.id)) + + @property + def id(self): + """ + The ID of the object. + """ + return self.attrs.get(self.id_attribute) + + @property + def short_id(self): + """ + The ID of the object, truncated to 10 characters. + """ + return self.id[:10] + + async def reload(self): + """ + Load this object from the server again and update ``attrs`` with the + new data. + """ + new_model = self.collection.get(self.id) + self.attrs = new_model.attrs + + +class Collection(object): + """ + A base class for representing all objects of a particular type on the + server. + """ + + #: The type of object this collection represents, set by subclasses + model = None + + def __init__(self, client=None): + #: The client pointing at the server that this collection of objects + #: is on. + self.client = client + + def __call__(self, *args, **kwargs): + raise TypeError( + "'{}' object is not callable." + "maybe try aiodocker.APIClient." + .format(self.__class__.__name__)) + + async def list(self): + raise NotImplementedError + + async def get(self, key): + raise NotImplementedError + + async def create(self, attrs=None): + raise NotImplementedError + + def prepare_model(self, attrs): + """ + Create a model from a set of attributes. + """ + if isinstance(attrs, Model): + attrs.client = self.client + attrs.collection = self + return attrs + elif isinstance(attrs, dict): + return self.model(attrs=attrs, client=self.client, collection=self) + else: + raise Exception("Can't create {} from {}".format(self.model.__name__, attrs)) diff --git a/aiodocker/types/__init__.py b/aiodocker/types/__init__.py new file mode 100644 index 00000000..5b26a9a5 --- /dev/null +++ b/aiodocker/types/__init__.py @@ -0,0 +1,4 @@ +# flake8: noqa +from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit +from .healthcheck import Healthcheck +from .networks import NetworkingConfig, EndpointConfig \ No newline at end of file diff --git a/aiodocker/types/base.py b/aiodocker/types/base.py new file mode 100644 index 00000000..42aad7e0 --- /dev/null +++ b/aiodocker/types/base.py @@ -0,0 +1,6 @@ + + +class DictType(dict): + def __init__(self, init): + for k, v in init.items(): + self[k] = v diff --git a/aiodocker/types/containers.py b/aiodocker/types/containers.py new file mode 100644 index 00000000..7f2768a2 --- /dev/null +++ b/aiodocker/types/containers.py @@ -0,0 +1,513 @@ + +from .. import errors +from ..utils.utils import ( + convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds, + format_environment, format_extra_hosts, normalize_links, parse_bytes, + parse_devices, split_command, +) +from .base import DictType +from .healthcheck import Healthcheck + + +class LogConfigTypesEnum(object): + _values = ( + 'json-file', + 'syslog', + 'journald', + 'gelf', + 'fluentd', + 'none' + ) + JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values + + +class LogConfig(DictType): + types = LogConfigTypesEnum + + def __init__(self, **kwargs): + log_driver_type = kwargs.get('type', kwargs.get('Type')) + config = kwargs.get('config', kwargs.get('Config')) or {} + + if config and not isinstance(config, dict): + raise ValueError("LogConfig.config must be a dictionary") + + super(LogConfig, self).__init__({ + 'Type': log_driver_type, + 'Config': config + }) + + @property + def type(self): + return self['Type'] + + @type.setter + def type(self, value): + self['Type'] = value + + @property + def config(self): + return self['Config'] + + def set_config_value(self, key, value): + self.config[key] = value + + def unset_config(self, key): + if key in self.config: + del self.config[key] + + +class Ulimit(DictType): + def __init__(self, **kwargs): + name = kwargs.get('name', kwargs.get('Name')) + soft = kwargs.get('soft', kwargs.get('Soft')) + hard = kwargs.get('hard', kwargs.get('Hard')) + if not isinstance(name, str): + raise ValueError("Ulimit.name must be a string") + if soft and not isinstance(soft, int): + raise ValueError("Ulimit.soft must be an integer") + if hard and not isinstance(hard, int): + raise ValueError("Ulimit.hard must be an integer") + super(Ulimit, self).__init__({ + 'Name': name, + 'Soft': soft, + 'Hard': hard + }) + + @property + def name(self): + return self['Name'] + + @name.setter + def name(self, value): + self['Name'] = value + + @property + def soft(self): + return self.get('Soft') + + @soft.setter + def soft(self, value): + self['Soft'] = value + + @property + def hard(self): + return self.get('Hard') + + @hard.setter + def hard(self, value): + self['Hard'] = value + + +class HostConfig(dict): + def __init__(self, version, binds=None, port_bindings=None, + lxc_conf=None, publish_all_ports=False, links=None, + privileged=False, dns=None, dns_search=None, + volumes_from=None, network_mode=None, restart_policy=None, + cap_add=None, cap_drop=None, devices=None, extra_hosts=None, + read_only=None, pid_mode=None, ipc_mode=None, + security_opt=None, ulimits=None, log_config=None, + mem_limit=None, memswap_limit=None, mem_reservation=None, + kernel_memory=None, mem_swappiness=None, cgroup_parent=None, + group_add=None, cpu_quota=None, cpu_period=None, + blkio_weight=None, blkio_weight_device=None, + device_read_bps=None, device_write_bps=None, + device_read_iops=None, device_write_iops=None, + oom_kill_disable=False, shm_size=None, sysctls=None, + tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None, + cpuset_cpus=None, userns_mode=None, pids_limit=None, + isolation=None, auto_remove=False, storage_opt=None, + init=None, volume_driver=None, + cpu_count=None, cpu_percent=None, nano_cpus=None, + cpuset_mems=None, runtime=None, mounts=None, + cpu_rt_period=None, cpu_rt_runtime=None, + device_cgroup_rules=None): + + if mem_limit is not None: + self['Memory'] = parse_bytes(mem_limit) + + if memswap_limit is not None: + self['MemorySwap'] = parse_bytes(memswap_limit) + + if mem_reservation: + self['MemoryReservation'] = parse_bytes(mem_reservation) + + if kernel_memory: + self['KernelMemory'] = parse_bytes(kernel_memory) + + if mem_swappiness is not None: + if not isinstance(mem_swappiness, int): + raise host_config_type_error( + 'mem_swappiness', mem_swappiness, 'int' + ) + + self['MemorySwappiness'] = mem_swappiness + + if shm_size is not None: + if isinstance(shm_size, bytes): + shm_size = parse_bytes(shm_size) + + self['ShmSize'] = shm_size + + if pid_mode: + self['PidMode'] = pid_mode + + if ipc_mode: + self['IpcMode'] = ipc_mode + + if privileged: + self['Privileged'] = privileged + + if oom_kill_disable: + self['OomKillDisable'] = oom_kill_disable + + if oom_score_adj: + if not isinstance(oom_score_adj, int): + raise host_config_type_error( + 'oom_score_adj', oom_score_adj, 'int' + ) + self['OomScoreAdj'] = oom_score_adj + + if publish_all_ports: + self['PublishAllPorts'] = publish_all_ports + + if read_only is not None: + self['ReadonlyRootfs'] = read_only + + if dns_search: + self['DnsSearch'] = dns_search + + if network_mode: + self['NetworkMode'] = network_mode + elif network_mode is None: + self['NetworkMode'] = 'default' + + if restart_policy: + if not isinstance(restart_policy, dict): + raise host_config_type_error( + 'restart_policy', restart_policy, 'dict' + ) + + self['RestartPolicy'] = restart_policy + + if cap_add: + self['CapAdd'] = cap_add + + if cap_drop: + self['CapDrop'] = cap_drop + + if devices: + self['Devices'] = parse_devices(devices) + + if group_add: + self['GroupAdd'] = [str(grp) for grp in group_add] + + if dns is not None: + self['Dns'] = dns + + if dns_opt is not None: + self['DnsOptions'] = dns_opt + + if security_opt is not None: + if not isinstance(security_opt, list): + raise host_config_type_error( + 'security_opt', security_opt, 'list' + ) + + self['SecurityOpt'] = security_opt + + if sysctls: + if not isinstance(sysctls, dict): + raise host_config_type_error('sysctls', sysctls, 'dict') + self['Sysctls'] = {} + for k, v in sysctls.items(): + self['Sysctls'][k] = str(v) + + if volumes_from is not None: + if isinstance(volumes_from, str): + volumes_from = volumes_from.split(',') + + self['VolumesFrom'] = volumes_from + + if binds is not None: + self['Binds'] = convert_volume_binds(binds) + + if port_bindings is not None: + self['PortBindings'] = convert_port_bindings(port_bindings) + + if extra_hosts is not None: + if isinstance(extra_hosts, dict): + extra_hosts = format_extra_hosts(extra_hosts) + + self['ExtraHosts'] = extra_hosts + + if links is not None: + self['Links'] = normalize_links(links) + + if isinstance(lxc_conf, dict): + formatted = [] + for k, v in lxc_conf.items(): + formatted.append({'Key': k, 'Value': str(v)}) + lxc_conf = formatted + + if lxc_conf is not None: + self['LxcConf'] = lxc_conf + + if cgroup_parent is not None: + self['CgroupParent'] = cgroup_parent + + if ulimits is not None: + if not isinstance(ulimits, list): + raise host_config_type_error('ulimits', ulimits, 'list') + self['Ulimits'] = [] + for l in ulimits: + if not isinstance(l, Ulimit): + l = Ulimit(**l) + self['Ulimits'].append(l) + + if log_config is not None: + if not isinstance(log_config, LogConfig): + if not isinstance(log_config, dict): + raise host_config_type_error( + 'log_config', log_config, 'LogConfig' + ) + log_config = LogConfig(**log_config) + + self['LogConfig'] = log_config + + if cpu_quota: + if not isinstance(cpu_quota, int): + raise host_config_type_error('cpu_quota', cpu_quota, 'int') + self['CpuQuota'] = cpu_quota + + if cpu_period: + if not isinstance(cpu_period, int): + raise host_config_type_error('cpu_period', cpu_period, 'int') + self['CpuPeriod'] = cpu_period + + if cpu_shares: + if not isinstance(cpu_shares, int): + raise host_config_type_error('cpu_shares', cpu_shares, 'int') + + self['CpuShares'] = cpu_shares + + if cpuset_cpus: + self['CpusetCpus'] = cpuset_cpus + + if cpuset_mems: + if not isinstance(cpuset_mems, str): + raise host_config_type_error( + 'cpuset_mems', cpuset_mems, 'str' + ) + self['CpusetMems'] = cpuset_mems + + if cpu_rt_period: + if not isinstance(cpu_rt_period, int): + raise host_config_type_error( + 'cpu_rt_period', cpu_rt_period, 'int' + ) + self['CPURealtimePeriod'] = cpu_rt_period + + if cpu_rt_runtime: + if not isinstance(cpu_rt_runtime, int): + raise host_config_type_error( + 'cpu_rt_runtime', cpu_rt_runtime, 'int' + ) + self['CPURealtimeRuntime'] = cpu_rt_runtime + + if blkio_weight: + if not isinstance(blkio_weight, int): + raise host_config_type_error( + 'blkio_weight', blkio_weight, 'int' + ) + self["BlkioWeight"] = blkio_weight + + if blkio_weight_device: + if not isinstance(blkio_weight_device, list): + raise host_config_type_error( + 'blkio_weight_device', blkio_weight_device, 'list' + ) + self["BlkioWeightDevice"] = blkio_weight_device + + if device_read_bps: + if not isinstance(device_read_bps, list): + raise host_config_type_error( + 'device_read_bps', device_read_bps, 'list' + ) + self["BlkioDeviceReadBps"] = device_read_bps + + if device_write_bps: + if not isinstance(device_write_bps, list): + raise host_config_type_error( + 'device_write_bps', device_write_bps, 'list' + ) + self["BlkioDeviceWriteBps"] = device_write_bps + + if device_read_iops: + if not isinstance(device_read_iops, list): + raise host_config_type_error( + 'device_read_iops', device_read_iops, 'list' + ) + self["BlkioDeviceReadIOps"] = device_read_iops + + if device_write_iops: + if not isinstance(device_write_iops, list): + raise host_config_type_error( + 'device_write_iops', device_write_iops, 'list' + ) + self["BlkioDeviceWriteIOps"] = device_write_iops + + if tmpfs: + self["Tmpfs"] = convert_tmpfs_mounts(tmpfs) + + if userns_mode: + if userns_mode != "host": + raise host_config_value_error("userns_mode", userns_mode) + self['UsernsMode'] = userns_mode + + if pids_limit: + if not isinstance(pids_limit, int): + raise host_config_type_error('pids_limit', pids_limit, 'int') + self["PidsLimit"] = pids_limit + + if isolation: + if not isinstance(isolation, str): + raise host_config_type_error('isolation', isolation, 'string') + self['Isolation'] = isolation + + if auto_remove: + self['AutoRemove'] = auto_remove + + if storage_opt is not None: + self['StorageOpt'] = storage_opt + + if init is not None: + self['Init'] = init + + if volume_driver is not None: + self['VolumeDriver'] = volume_driver + + if cpu_count: + if not isinstance(cpu_count, int): + raise host_config_type_error('cpu_count', cpu_count, 'int') + self['CpuCount'] = cpu_count + + if cpu_percent: + if not isinstance(cpu_percent, int): + raise host_config_type_error('cpu_percent', cpu_percent, 'int') + self['CpuPercent'] = cpu_percent + + if nano_cpus: + if not isinstance(nano_cpus, int): + raise host_config_type_error('nano_cpus', nano_cpus, 'int') + self['NanoCpus'] = nano_cpus + + if runtime: + self['Runtime'] = runtime + + if mounts is not None: + self['Mounts'] = mounts + + if device_cgroup_rules is not None: + if not isinstance(device_cgroup_rules, list): + raise host_config_type_error( + 'device_cgroup_rules', device_cgroup_rules, 'list' + ) + self['DeviceCgroupRules'] = device_cgroup_rules + + +def host_config_type_error(param, param_value, expected): + error_msg = 'Invalid type for {0} param: expected {1} but found {2}' + return TypeError(error_msg.format(param, expected, type(param_value))) + + +def host_config_value_error(param, param_value): + error_msg = 'Invalid value for {0} param: {1}' + return ValueError(error_msg.format(param, param_value)) + + +class ContainerConfig(dict): + def __init__( + self, version, image, command, hostname=None, user=None, detach=False, + stdin_open=False, tty=False, ports=None, environment=None, + volumes=None, network_disabled=False, entrypoint=None, + working_dir=None, domainname=None, host_config=None, mac_address=None, + labels=None, stop_signal=None, networking_config=None, + healthcheck=None, stop_timeout=None, runtime=None + ): + + if isinstance(command, str): + command = split_command(command) + + if isinstance(entrypoint, str): + entrypoint = split_command(entrypoint) + + if isinstance(environment, dict): + environment = format_environment(environment) + + if isinstance(labels, list): + labels = dict((lbl, str('')) for lbl in labels) + + if isinstance(ports, list): + exposed_ports = {} + for port_definition in ports: + port = port_definition + proto = 'tcp' + if isinstance(port_definition, tuple): + if len(port_definition) == 2: + proto = port_definition[1] + port = port_definition[0] + exposed_ports['{0}/{1}'.format(port, proto)] = {} + ports = exposed_ports + + if isinstance(volumes, str): + volumes = [volumes, ] + + if isinstance(volumes, list): + volumes_dict = {} + for vol in volumes: + volumes_dict[vol] = {} + volumes = volumes_dict + + if healthcheck and isinstance(healthcheck, dict): + healthcheck = Healthcheck(**healthcheck) + + attach_stdin = False + attach_stdout = False + attach_stderr = False + stdin_once = False + + if not detach: + attach_stdout = True + attach_stderr = True + + if stdin_open: + attach_stdin = True + stdin_once = True + + self.update({ + 'Hostname': hostname, + 'Domainname': domainname, + 'ExposedPorts': ports, + 'User': str(user) if user else None, + 'Tty': tty, + 'OpenStdin': stdin_open, + 'StdinOnce': stdin_once, + 'AttachStdin': attach_stdin, + 'AttachStdout': attach_stdout, + 'AttachStderr': attach_stderr, + 'Env': environment, + 'Cmd': command, + 'Image': image, + 'Volumes': volumes, + 'NetworkDisabled': network_disabled, + 'Entrypoint': entrypoint, + 'WorkingDir': working_dir, + 'HostConfig': host_config, + 'NetworkingConfig': networking_config, + 'MacAddress': mac_address, + 'Labels': labels, + 'StopSignal': stop_signal, + 'Healthcheck': healthcheck, + 'StopTimeout': stop_timeout, + 'Runtime': runtime + }) diff --git a/aiodocker/types/healthcheck.py b/aiodocker/types/healthcheck.py new file mode 100644 index 00000000..b77185f8 --- /dev/null +++ b/aiodocker/types/healthcheck.py @@ -0,0 +1,86 @@ +from .base import DictType + + +class Healthcheck(DictType): + """ + Defines a healthcheck configuration for a container or service. + + Args: + test (:py:class:`list` or str): Test to perform to determine + container health. Possible values: + + - Empty list: Inherit healthcheck from parent image + - ``["NONE"]``: Disable healthcheck + - ``["CMD", args...]``: exec arguments directly. + - ``["CMD-SHELL", command]``: RUn command in the system's + default shell. + + If a string is provided, it will be used as a ``CMD-SHELL`` + command. + interval (int): The time to wait between checks in nanoseconds. It + should be 0 or at least 1000000 (1 ms). + timeout (int): The time to wait before considering the check to + have hung. It should be 0 or at least 1000000 (1 ms). + retries (integer): The number of consecutive failures needed to + consider a container as unhealthy. + start_period (integer): Start period for the container to + initialize before starting health-retries countdown in + nanoseconds. It should be 0 or at least 1000000 (1 ms). + """ + def __init__(self, **kwargs): + test = kwargs.get('test', kwargs.get('Test')) + if isinstance(test, str): + test = ["CMD-SHELL", test] + + interval = kwargs.get('interval', kwargs.get('Interval')) + timeout = kwargs.get('timeout', kwargs.get('Timeout')) + retries = kwargs.get('retries', kwargs.get('Retries')) + start_period = kwargs.get('start_period', kwargs.get('StartPeriod')) + + super(Healthcheck, self).__init__({ + 'Test': test, + 'Interval': interval, + 'Timeout': timeout, + 'Retries': retries, + 'StartPeriod': start_period + }) + + @property + def test(self): + return self['Test'] + + @test.setter + def test(self, value): + self['Test'] = value + + @property + def interval(self): + return self['Interval'] + + @interval.setter + def interval(self, value): + self['Interval'] = value + + @property + def timeout(self): + return self['Timeout'] + + @timeout.setter + def timeout(self, value): + self['Timeout'] = value + + @property + def retries(self): + return self['Retries'] + + @retries.setter + def retries(self, value): + self['Retries'] = value + + @property + def start_period(self): + return self['StartPeriod'] + + @start_period.setter + def start_period(self, value): + self['StartPeriod'] = value diff --git a/aiodocker/types/networks.py b/aiodocker/types/networks.py new file mode 100644 index 00000000..70279bb4 --- /dev/null +++ b/aiodocker/types/networks.py @@ -0,0 +1,33 @@ +from ..utils.utils import normalize_links + + +class EndpointConfig(dict): + def __init__(self, version, aliases=None, links=None, ipv4_address=None, + ipv6_address=None, link_local_ips=None): + + if aliases: + self["Aliases"] = aliases + + if links: + self["Links"] = normalize_links(links) + + ipam_config = {} + if ipv4_address: + ipam_config['IPv4Address'] = ipv4_address + + if ipv6_address: + ipam_config['IPv6Address'] = ipv6_address + + if link_local_ips is not None: + ipam_config['LinkLocalIPs'] = link_local_ips + + if ipam_config: + self['IPAMConfig'] = ipam_config + + +class NetworkingConfig(dict): + def __init__(self, endpoints_config=None): + if endpoints_config: + self["EndpointsConfig"] = endpoints_config + + diff --git a/aiodocker/utils/__init__.py b/aiodocker/utils/__init__.py new file mode 100644 index 00000000..e7ec2fa4 --- /dev/null +++ b/aiodocker/utils/__init__.py @@ -0,0 +1,4 @@ +from .utils import (httpize, parse_result, _DecodeHelper, identical, human_bool, clean_map, + compose_auth_header, clean_networks, clean_filters, parse_content_type, + format_env, mktar_from_dockerfile) + diff --git a/aiodocker/utils/utils.py b/aiodocker/utils/utils.py new file mode 100644 index 00000000..a62398e5 --- /dev/null +++ b/aiodocker/utils/utils.py @@ -0,0 +1,554 @@ +import asyncio +import base64 +import codecs +from io import BytesIO +import sys +import shlex +from typing import ( + Any, Iterable, Optional, Union, + MutableMapping, Mapping, Tuple, + BinaryIO, IO, +) +import tempfile +import tarfile +import json + +from distutils.version import StrictVersion + +from .. import errors + + +BYTE_UNITS = { + 'b': 1, + 'k': 1024, + 'm': 1024 * 1024, + 'g': 1024 * 1024 * 1024 +} + + +def compare_version(v1, v2): + """Compare docker versions + + >>> v1 = '1.9' + >>> v2 = '1.10' + >>> compare_version(v1, v2) + 1 + >>> compare_version(v2, v1) + -1 + >>> compare_version(v2, v2) + 0 + """ + s1 = StrictVersion(v1) + s2 = StrictVersion(v2) + if s1 == s2: + return 0 + elif s1 > s2: + return -1 + else: + return 1 + + +def version_lt(v1, v2): + return compare_version(v1, v2) > 0 + + +def version_gte(v1, v2): + return not version_lt(v1, v2) + + +async def parse_result(response, response_type=None, *, + encoding='utf-8'): + ''' + Convert the response to native objects by the given response type + or the auto-detected HTTP content-type. + It also ensures release of the response object. + ''' + if response_type is None: + ct = response.headers.get('content-type') + if ct is None: + raise TypeError('Cannot auto-detect respone type ' + 'due to missing Content-Type header.') + main_type, sub_type, extras = parse_content_type(ct) + if sub_type == 'json': + response_type = 'json' + elif sub_type == 'x-tar': + response_type = 'tar' + elif (main_type, sub_type) == ('text', 'plain'): + response_type = 'text' + encoding = extras.get('charset', encoding) + else: + raise TypeError("Unrecognized response type: {ct}" + .format(ct=ct)) + if 'tar' == response_type: + what = await response.read() + return tarfile.open(mode='r', fileobj=BytesIO(what)) + if 'json' == response_type: + data = await response.json(encoding=encoding) + elif 'text' == response_type: + data = await response.text(encoding=encoding) + else: + data = await response.read() + return data + + +def parse_content_type(ct: str) -> Tuple[str, str, Mapping[str, str]]: + ''' + Decompose the value of HTTP "Content-Type" header into + the main/sub MIME types and other extra options as a dictionary. + All parsed values are lower-cased automatically. + ''' + pieces = ct.split(';') + try: + main_type, sub_type = pieces[0].split('/') + except ValueError: + msg = 'Invalid mime-type component: "{0}"'.format(pieces[0]) + raise ValueError(msg) + if len(pieces) > 1: + options = {} + for opt in pieces[1:]: + opt = opt.strip() + if not opt: + continue + try: + k, v = opt.split('=', 1) + except ValueError: + msg = 'Invalid option component: "{0}"'.format(opt) + raise ValueError(msg) + else: + options[k.lower()] = v.lower() + else: + options = {} + return main_type.lower(), sub_type.lower(), options + + +def identical(d1, d2): + if type(d1) != type(d2): + return False + + if isinstance(d1, dict): + keys = set(d1.keys()) | set(d2.keys()) + for key in keys: + if not identical(d1.get(key, {}), d2.get(key, {})): + return False + return True + + if isinstance(d1, list): + if len(d1) != len(d2): + return False + + pairs = zip(d1, d2) + return all((identical(x, y) for (x, y) in pairs)) + + return d1 == d2 + + +_true_strs = frozenset(['true', 'yes', 'y', '1']) +_false_strs = frozenset(['false', 'no', 'n', '0']) + + +def human_bool(s) -> bool: + if isinstance(s, str): + if s.lower() in _true_strs: + return True + if s.lower() in _false_strs: + return False + raise ValueError('Cannot interpret {s!r} as boolean.'.format(s=s)) + else: + return bool(s) + + +def httpize(d: Optional[Mapping]) -> Mapping[str, Any]: + if d is None: + return None + converted = {} + for k, v in d.items(): + if isinstance(v, bool): + v = '1' if v else '0' + if not isinstance(v, str): + v = str(v) + converted[k] = v + return converted + + +class _DecodeHelper: + """ + Decode logs from the Docker Engine + """ + + def __init__(self, generator, encoding): + self._gen = generator.__aiter__() + self._decoder = codecs.getincrementaldecoder(encoding)(errors='ignore') + self._flag = False + + def __aiter__(self): + return self + + # to make it compatible with Python 3.5.0 and 3.5.2 + # https://www.python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions + if sys.version_info <= (3, 5, 2): + __aiter__ = asyncio.coroutine(__aiter__) + + async def __anext__(self): + if self._flag: + raise StopAsyncIteration + + # we catch StopAsyncIteration from self._gen + # because we need to close the decoder + # then we raise StopAsyncIteration checking self._flag + try: + stream = await self._gen.__anext__() + except StopAsyncIteration: + self._flag = True + stream_decoded = self._decoder.decode(b'', final=True) + if stream_decoded: + return stream_decoded + raise StopAsyncIteration + else: + return self._decoder.decode(stream) + + +def clean_map(obj: Mapping[Any, Any]) -> Mapping[Any, Any]: + """ + Return a new copied dictionary without the keys with ``None`` values from + the given Mapping object. + """ + return {k: v for k, v in obj.items() if v is not None} + + +def format_env(key, value: Union[None, bytes, str]) -> str: + """ + Formats envs from {key:value} to ['key=value'] + """ + if value is None: + return key + if isinstance(value, bytes): + value = value.decode('utf-8') + + return "{key}={value}".format(key=key, value=value) + + +def clean_networks(networks: Iterable[str]=None) -> Iterable[str]: + """ + Cleans the values inside `networks` + Returns a new list + """ + if not networks: + return networks + if not isinstance(networks, list): + raise TypeError('networks parameter must be a list.') + + result = [] + for n in networks: + if isinstance(n, str): + n = {'Target': n} + result.append(n) + return result + + +def clean_filters(filters: Mapping=None) -> str: + """ + Checks the values inside `filters` + https://docs.docker.com/engine/api/v1.29/#operation/ServiceList + Returns a new dictionary in the format `map[string][]string` jsonized + """ + + if filters and isinstance(filters, dict): + for k, v in filters.items(): + if isinstance(v, bool): + v = 'true' if v else 'false' + if not isinstance(v, list): + v = [v, ] + filters[k] = v + + return json.dumps(filters) + + +def mktar_from_dockerfile(fileobject: BinaryIO) -> IO: + """ + Create a zipped tar archive from a Dockerfile + **Remember to close the file object** + Args: + fileobj: a Dockerfile + Returns: + a NamedTemporaryFile() object + """ + + f = tempfile.NamedTemporaryFile() + t = tarfile.open(mode='w:gz', fileobj=f) + + if isinstance(fileobject, BytesIO): + dfinfo = tarfile.TarInfo('Dockerfile') + dfinfo.size = len(fileobject.getvalue()) + fileobject.seek(0) + else: + dfinfo = t.gettarinfo(fileobj=fileobject, arcname='Dockerfile') + + t.addfile(dfinfo, fileobject) + t.close() + f.seek(0) + return f + + +def compose_auth_header(auth: Union[MutableMapping, str, bytes], + registry_addr: str=None) -> str: + """ + Validate and compose base64-encoded authentication header + with an optional support for parsing legacy-style "user:password" + strings. + + Args: + auth: Authentication information + registry_addr: An address of the registry server + + Returns: + A base64-encoded X-Registry-Auth header value + """ + if isinstance(auth, Mapping): + # Validate the JSON format only. + if 'identitytoken' in auth: + pass + elif 'auth' in auth: + return compose_auth_header(auth['auth'], registry_addr) + else: + if registry_addr: + auth['serveraddress'] = registry_addr + auth_json = json.dumps(auth).encode('utf-8') + auth = base64.b64encode(auth_json).decode('ascii') + elif isinstance(auth, (str, bytes)): + # Parse simple "username:password"-formatted strings + # and attach the server address specified. + if isinstance(auth, bytes): + auth = auth.decode('utf-8') + s = base64.b64decode(auth) + username, passwd = s.split(b':', 1) + config = { + "username": username.decode('utf-8'), + "password": passwd.decode('utf-8'), + "email": None, + "serveraddress": registry_addr, + } + auth_json = json.dumps(config).encode('utf-8') + auth = base64.b64encode(auth_json).decode('ascii') + else: + raise TypeError( + "auth must be base64 encoded string/bytes or a dictionary") + return auth + + +def parse_repository_tag(repo_name): + parts = repo_name.rsplit('@', 1) + if len(parts) == 2: + return tuple(parts) + parts = repo_name.rsplit(':', 1) + if len(parts) == 2 and '/' not in parts[1]: + return tuple(parts) + return repo_name, None + + +def parse_bytes(s): + if isinstance(s, (int, float)): + return s + if len(s) == 0: + return 0 + + if s[-2:-1].isalpha() and s[-1].isalpha(): + if s[-1] == "b" or s[-1] == "B": + s = s[:-1] + units = BYTE_UNITS + suffix = s[-1].lower() + + # Check if the variable is a string representation of an int + # without a units part. Assuming that the units are bytes. + if suffix.isdigit(): + digits_part = s + suffix = 'b' + else: + digits_part = s[:-1] + + if suffix in units.keys() or suffix.isdigit(): + try: + digits = int(digits_part) + except ValueError: + raise errors.DockerException( + 'Failed converting the string value for memory ({0}) to' + ' an integer.'.format(digits_part) + ) + + # Reconvert to long for the final result + s = int(digits * units[suffix]) + else: + raise errors.DockerException( + 'The specified value for memory ({0}) should specify the' + ' units. The postfix should be one of the `b` `k` `m` `g`' + ' characters'.format(s) + ) + + return s + + +def parse_devices(devices): + device_list = [] + for device in devices: + if isinstance(device, dict): + device_list.append(device) + continue + if not isinstance(device, str): + raise errors.DockerException( + 'Invalid device type {0}'.format(type(device)) + ) + device_mapping = device.split(':') + if device_mapping: + path_on_host = device_mapping[0] + if len(device_mapping) > 1: + path_in_container = device_mapping[1] + else: + path_in_container = path_on_host + if len(device_mapping) > 2: + permissions = device_mapping[2] + else: + permissions = 'rwm' + device_list.append({ + 'PathOnHost': path_on_host, + 'PathInContainer': path_in_container, + 'CgroupPermissions': permissions + }) + return device_list + + +def _convert_port_binding(binding): + result = {'HostIp': '', 'HostPort': ''} + if isinstance(binding, tuple): + if len(binding) == 2: + result['HostPort'] = binding[1] + result['HostIp'] = binding[0] + elif isinstance(binding[0], str): + result['HostIp'] = binding[0] + else: + result['HostPort'] = binding[0] + elif isinstance(binding, dict): + if 'HostPort' in binding: + result['HostPort'] = binding['HostPort'] + if 'HostIp' in binding: + result['HostIp'] = binding['HostIp'] + else: + raise ValueError(binding) + else: + result['HostPort'] = binding + + if result['HostPort'] is None: + result['HostPort'] = '' + else: + result['HostPort'] = str(result['HostPort']) + + return result + + +def convert_port_bindings(port_bindings): + result = {} + for k, v in port_bindings.items(): + key = str(k) + if '/' not in key: + key += '/tcp' + if isinstance(v, list): + result[key] = [_convert_port_binding(binding) for binding in v] + else: + result[key] = [_convert_port_binding(v)] + return result + + +def convert_volume_binds(binds): + if isinstance(binds, list): + return binds + + result = [] + for k, v in binds.items(): + if isinstance(k, bytes): + k = k.decode('utf-8') + + if isinstance(v, dict): + if 'ro' in v and 'mode' in v: + raise ValueError( + 'Binding cannot contain both "ro" and "mode": {}' + .format(repr(v)) + ) + + bind = v['bind'] + if isinstance(bind, bytes): + bind = bind.decode('utf-8') + + if 'ro' in v: + mode = 'ro' if v['ro'] else 'rw' + elif 'mode' in v: + mode = v['mode'] + else: + mode = 'rw' + + result.append('{0}:{1}:{2}').format(k, bind, mode) + else: + if isinstance(v, bytes): + v = v.decode('utf-8') + result.append('{0}:{1}:rw').format(k, v) + return result + + +def convert_tmpfs_mounts(tmpfs): + if isinstance(tmpfs, dict): + return tmpfs + + if not isinstance(tmpfs, list): + raise ValueError( + 'Expected tmpfs value to be either a list or a dict, found: {}' + .format(type(tmpfs).__name__) + ) + + result = {} + for mount in tmpfs: + if isinstance(mount, str): + if ":" in mount: + name, options = mount.split(":", 1) + else: + name = mount + options = "" + + else: + raise ValueError( + "Expected item in tmpfs list to be a string, found: {}" + .format(type(mount).__name__) + ) + + result[name] = options + return result + + +def normalize_links(links): + if isinstance(links, dict): + links = links.items() + + return ['{0}:{1}'.format(k, v) for k, v in sorted(links)] + + +def split_command(command): + return shlex.split(command) + + +def format_environment(environment): + def format_env(key, value): + if value is None: + return key + if isinstance(value, bytes): + value = value.decode('utf-8') + + return u'{key}={value}'.format(key=key, value=value) + return [format_env(*var) for var in environment.items()] + + +def format_extra_hosts(extra_hosts, task=False): + # Use format dictated by Swarm API if container is part of a task + if task: + return [ + '{} {}'.format(v, k) for k, v in sorted(extra_hosts.items()) + ] + + return [ + '{}:{}'.format(k, v) for k, v in sorted(extra_hosts.items) + ] diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle deleted file mode 100644 index 024c29d8..00000000 Binary files a/docs/_build/doctrees/environment.pickle and /dev/null differ diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree index 11c9e3c3..09c8b901 100644 Binary files a/docs/_build/doctrees/index.doctree and b/docs/_build/doctrees/index.doctree differ diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt index 2a96baac..c3ed06d7 100644 --- a/docs/_build/html/_sources/index.rst.txt +++ b/docs/_build/html/_sources/index.rst.txt @@ -103,6 +103,12 @@ We support `Stack Overflow `_. Please add *python-asyncio* tag to your question there. +Contribution +------------ + +Please follow the `Contribution Guide `_. + + Author and License ------------------- @@ -111,13 +117,21 @@ The ``aiodocker`` package is written by Andrew Svetlov. It's *Apache 2* licensed and freely available. - - .. toctree:: + :hidden: :maxdepth: 2 - :caption: Contents: - + client + containers + images + services + swarm + volumes + tasks + log + events + exceptions + api Indices and tables ================== @@ -125,3 +139,4 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` + diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css index 6df76b0a..19ced105 100644 --- a/docs/_build/html/_static/basic.css +++ b/docs/_build/html/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -82,9 +82,21 @@ div.sphinxsidebar input { } div.sphinxsidebar #searchbox input[type="text"] { - width: 170px; + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; } + img { border: 0; max-width: 100%; @@ -199,6 +211,11 @@ table.modindextable td { /* -- general body styles --------------------------------------------------- */ +div.body { + min-width: 450px; + max-width: 800px; +} + div.body p, div.body dd, div.body li, div.body blockquote { -moz-hyphens: auto; -ms-hyphens: auto; @@ -332,6 +349,11 @@ table.docutils { border-collapse: collapse; } +table.align-center { + margin-left: auto; + margin-right: auto; +} + table caption span.caption-number { font-style: italic; } @@ -445,10 +467,14 @@ dd { margin-left: 30px; } -dt:target, .highlighted { +dt:target, span.highlighted { background-color: #fbe54e; } +rect.highlighted { + fill: #fbe54e; +} + dl.glossary dt { font-weight: bold; font-size: 1.1em; diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js index 56549772..0c15c009 100644 --- a/docs/_build/html/_static/doctools.js +++ b/docs/_build/html/_static/doctools.js @@ -4,7 +4,7 @@ * * Sphinx JavaScript utilities for all documentation. * - * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -45,7 +45,7 @@ jQuery.urlencode = encodeURIComponent; * it will always return arrays of strings for the value parts. */ jQuery.getQueryParameters = function(s) { - if (typeof s == 'undefined') + if (typeof s === 'undefined') s = document.location.search; var parts = s.substr(s.indexOf('?') + 1).split('&'); var result = {}; @@ -66,29 +66,53 @@ jQuery.getQueryParameters = function(s) { * span elements with the given class name. */ jQuery.fn.highlightText = function(text, className) { - function highlight(node) { - if (node.nodeType == 3) { + function highlight(node, addItems) { + if (node.nodeType === 3) { var val = node.nodeValue; var pos = val.toLowerCase().indexOf(text); if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { - var span = document.createElement("span"); - span.className = className; + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } span.appendChild(document.createTextNode(val.substr(pos, text.length))); node.parentNode.insertBefore(span, node.parentNode.insertBefore( document.createTextNode(val.substr(pos + text.length)), node.nextSibling)); node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var bbox = span.getBBox(); + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + var parentOfText = node.parentNode.parentNode; + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } } } else if (!jQuery(node).is("button, select, textarea")) { jQuery.each(node.childNodes, function() { - highlight(this); + highlight(this, addItems); }); } } - return this.each(function() { - highlight(this); + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; }; /* @@ -131,21 +155,21 @@ var Documentation = { * i18n support */ TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, LOCALE : 'unknown', // gettext and ngettext don't access this so that the functions // can safely bound to a different name (_ = Documentation.gettext) gettext : function(string) { var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated == 'undefined') + if (typeof translated === 'undefined') return string; - return (typeof translated == 'string') ? translated : translated[0]; + return (typeof translated === 'string') ? translated : translated[0]; }, ngettext : function(singular, plural, n) { var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated == 'undefined') + if (typeof translated === 'undefined') return (n == 1) ? singular : plural; return translated[Documentation.PLURALEXPR(n)]; }, @@ -180,7 +204,7 @@ var Documentation = { * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 */ fixFirefoxAnchorBug : function() { - if (document.location.hash) + if (document.location.hash && $.browser.mozilla) window.setTimeout(function() { document.location.href += ''; }, 10); @@ -216,7 +240,7 @@ var Documentation = { var src = $(this).attr('src'); var idnum = $(this).attr('id').substr(7); $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) == 'minus.png') + if (src.substr(-9) === 'minus.png') $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); else $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); @@ -248,7 +272,7 @@ var Documentation = { var path = document.location.pathname; var parts = path.split(/\//); $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this == '..') + if (this === '..') parts.pop(); }); var url = parts.join('/'); diff --git a/docs/_build/html/_static/jquery.js b/docs/_build/html/_static/jquery.js index f6a6a99e..644d35e2 100644 --- a/docs/_build/html/_static/jquery.js +++ b/docs/_build/html/_static/jquery.js @@ -1,4 +1,4 @@ -/*! jQuery v3.1.0 | (c) jQuery Foundation | jquery.org/license */ -!function(a,b){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){"use strict";var c=[],d=a.document,e=Object.getPrototypeOf,f=c.slice,g=c.concat,h=c.push,i=c.indexOf,j={},k=j.toString,l=j.hasOwnProperty,m=l.toString,n=m.call(Object),o={};function p(a,b){b=b||d;var c=b.createElement("script");c.text=a,b.head.appendChild(c).parentNode.removeChild(c)}var q="3.1.0",r=function(a,b){return new r.fn.init(a,b)},s=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,t=/^-ms-/,u=/-([a-z])/g,v=function(a,b){return b.toUpperCase()};r.fn=r.prototype={jquery:q,constructor:r,length:0,toArray:function(){return f.call(this)},get:function(a){return null!=a?a<0?this[a+this.length]:this[a]:f.call(this)},pushStack:function(a){var b=r.merge(this.constructor(),a);return b.prevObject=this,b},each:function(a){return r.each(this,a)},map:function(a){return this.pushStack(r.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(f.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(a<0?b:0);return this.pushStack(c>=0&&c0&&b-1 in a)}var x=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=function(a,b){for(var c=0,d=a.length;c+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(N),U=new RegExp("^"+L+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L+"|[*])"),ATTR:new RegExp("^"+M),PSEUDO:new RegExp("^"+N),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),aa=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:d<0?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ba=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g,ca=function(a,b){return b?"\0"===a?"\ufffd":a.slice(0,-1)+"\\"+a.charCodeAt(a.length-1).toString(16)+" ":"\\"+a},da=function(){m()},ea=ta(function(a){return a.disabled===!0},{dir:"parentNode",next:"legend"});try{G.apply(D=H.call(v.childNodes),v.childNodes),D[v.childNodes.length].nodeType}catch(fa){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s=b&&b.ownerDocument,w=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==w&&9!==w&&11!==w)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==w&&(l=Z.exec(a)))if(f=l[1]){if(9===w){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(s&&(j=s.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(l[2])return G.apply(d,b.getElementsByTagName(a)),d;if((f=l[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==w)s=b,r=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(ba,ca):b.setAttribute("id",k=u),o=g(a),h=o.length;while(h--)o[h]="#"+k+" "+sa(o[h]);r=o.join(","),s=$.test(a)&&qa(b.parentNode)||b}if(r)try{return G.apply(d,s.querySelectorAll(r)),d}catch(x){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(P,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("fieldset");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&a.sourceIndex-b.sourceIndex;if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return function(b){return"label"in b&&b.disabled===a||"form"in b&&b.disabled===a||"form"in b&&b.disabled===!1&&(b.isDisabled===a||b.isDisabled!==!a&&("label"in b||!ea(b))!==a)}}function pa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function qa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return!!b&&"HTML"!==b.nodeName},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),v!==n&&(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(n.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){if("undefined"!=typeof b.getElementsByClassName&&p)return b.getElementsByClassName(a)},r=[],q=[],(c.qsa=Y.test(n.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){a.innerHTML="";var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+K+"*[*^$|!~]?="),2!==a.querySelectorAll(":enabled").length&&q.push(":enabled",":disabled"),o.appendChild(a).disabled=!0,2!==a.querySelectorAll(":disabled").length&&q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Y.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"*"),s.call(a,"[s!='']:x"),r.push("!=",N)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Y.test(o.compareDocumentPosition),t=b||Y.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?I(k,a)-I(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?I(k,a)-I(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?la(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(S,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.escape=function(a){return(a+"").replace(ba,ca)},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(_,aa),a[3]=(a[3]||a[4]||a[5]||"").replace(_,aa),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return V.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&T.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(_,aa).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:!b||(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(O," ")+" ").indexOf(c)>-1:"|="===b&&(e===c||e.slice(0,c.length+1)===c+"-"))}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(P,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(_,aa),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return U.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(_,aa).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:oa(!1),disabled:oa(!0),checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:pa(function(){return[0]}),last:pa(function(a,b){return[b-1]}),eq:pa(function(a,b,c){return[c<0?c+b:c]}),even:pa(function(a,b){for(var c=0;c=0;)a.push(d);return a}),gt:pa(function(a,b,c){for(var d=c<0?c+b:c;++d1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function va(a,b,c){for(var d=0,e=b.length;d-1&&(f[j]=!(g[j]=l))}}else r=wa(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ya(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ta(function(a){return a===b},h,!0),l=ta(function(a){return I(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];i1&&ua(m),i>1&&sa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(P,"$1"),c,i0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=E.call(i));u=wa(u)}G.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&ga.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=ya(b[c]),f[u]?d.push(f):e.push(f);f=A(a,za(e,d)),f.selector=a}return f},i=ga.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(_,aa),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=V.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(_,aa),$.test(j[0].type)&&qa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&sa(j),!a)return G.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||$.test(a)&&qa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("fieldset"))}),ja(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){if(!c)return a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){if(!c&&"input"===a.nodeName.toLowerCase())return a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(J,function(a,b,c){var d;if(!c)return a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);r.find=x,r.expr=x.selectors,r.expr[":"]=r.expr.pseudos,r.uniqueSort=r.unique=x.uniqueSort,r.text=x.getText,r.isXMLDoc=x.isXML,r.contains=x.contains,r.escapeSelector=x.escape;var y=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&r(a).is(c))break;d.push(a)}return d},z=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},A=r.expr.match.needsContext,B=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i,C=/^.[^:#\[\.,]*$/;function D(a,b,c){if(r.isFunction(b))return r.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return r.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(C.test(b))return r.filter(b,a,c);b=r.filter(b,a)}return r.grep(a,function(a){return i.call(b,a)>-1!==c&&1===a.nodeType})}r.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?r.find.matchesSelector(d,a)?[d]:[]:r.find.matches(a,r.grep(b,function(a){return 1===a.nodeType}))},r.fn.extend({find:function(a){var b,c,d=this.length,e=this;if("string"!=typeof a)return this.pushStack(r(a).filter(function(){for(b=0;b1?r.uniqueSort(c):c},filter:function(a){return this.pushStack(D(this,a||[],!1))},not:function(a){return this.pushStack(D(this,a||[],!0))},is:function(a){return!!D(this,"string"==typeof a&&A.test(a)?r(a):a||[],!1).length}});var E,F=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,G=r.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||E,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:F.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof r?b[0]:b,r.merge(this,r.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),B.test(e[1])&&r.isPlainObject(b))for(e in b)r.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&(this[0]=f,this.length=1),this}return a.nodeType?(this[0]=a,this.length=1,this):r.isFunction(a)?void 0!==c.ready?c.ready(a):a(r):r.makeArray(a,this)};G.prototype=r.fn,E=r(d);var H=/^(?:parents|prev(?:Until|All))/,I={children:!0,contents:!0,next:!0,prev:!0};r.fn.extend({has:function(a){var b=r(a,this),c=b.length;return this.filter(function(){for(var a=0;a-1:1===c.nodeType&&r.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?r.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?i.call(r(a),this[0]):i.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(r.uniqueSort(r.merge(this.get(),r(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function J(a,b){while((a=a[b])&&1!==a.nodeType);return a}r.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return y(a,"parentNode")},parentsUntil:function(a,b,c){return y(a,"parentNode",c)},next:function(a){return J(a,"nextSibling")},prev:function(a){return J(a,"previousSibling")},nextAll:function(a){return y(a,"nextSibling")},prevAll:function(a){return y(a,"previousSibling")},nextUntil:function(a,b,c){return y(a,"nextSibling",c)},prevUntil:function(a,b,c){return y(a,"previousSibling",c)},siblings:function(a){return z((a.parentNode||{}).firstChild,a)},children:function(a){return z(a.firstChild)},contents:function(a){return a.contentDocument||r.merge([],a.childNodes)}},function(a,b){r.fn[a]=function(c,d){var e=r.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=r.filter(d,e)),this.length>1&&(I[a]||r.uniqueSort(e),H.test(a)&&e.reverse()),this.pushStack(e)}});var K=/\S+/g;function L(a){var b={};return r.each(a.match(K)||[],function(a,c){b[c]=!0}),b}r.Callbacks=function(a){a="string"==typeof a?L(a):r.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h-1)f.splice(c,1),c<=h&&h--}),this},has:function(a){return a?r.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||b||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j};function M(a){return a}function N(a){throw a}function O(a,b,c){var d;try{a&&r.isFunction(d=a.promise)?d.call(a).done(b).fail(c):a&&r.isFunction(d=a.then)?d.call(a,b,c):b.call(void 0,a)}catch(a){c.call(void 0,a)}}r.extend({Deferred:function(b){var c=[["notify","progress",r.Callbacks("memory"),r.Callbacks("memory"),2],["resolve","done",r.Callbacks("once memory"),r.Callbacks("once memory"),0,"resolved"],["reject","fail",r.Callbacks("once memory"),r.Callbacks("once memory"),1,"rejected"]],d="pending",e={state:function(){return d},always:function(){return f.done(arguments).fail(arguments),this},"catch":function(a){return e.then(null,a)},pipe:function(){var a=arguments;return r.Deferred(function(b){r.each(c,function(c,d){var e=r.isFunction(a[d[4]])&&a[d[4]];f[d[1]](function(){var a=e&&e.apply(this,arguments);a&&r.isFunction(a.promise)?a.promise().progress(b.notify).done(b.resolve).fail(b.reject):b[d[0]+"With"](this,e?[a]:arguments)})}),a=null}).promise()},then:function(b,d,e){var f=0;function g(b,c,d,e){return function(){var h=this,i=arguments,j=function(){var a,j;if(!(b=f&&(d!==N&&(h=void 0,i=[a]),c.rejectWith(h,i))}};b?k():(r.Deferred.getStackHook&&(k.stackTrace=r.Deferred.getStackHook()),a.setTimeout(k))}}return r.Deferred(function(a){c[0][3].add(g(0,a,r.isFunction(e)?e:M,a.notifyWith)),c[1][3].add(g(0,a,r.isFunction(b)?b:M)),c[2][3].add(g(0,a,r.isFunction(d)?d:N))}).promise()},promise:function(a){return null!=a?r.extend(a,e):e}},f={};return r.each(c,function(a,b){var g=b[2],h=b[5];e[b[1]]=g.add,h&&g.add(function(){d=h},c[3-a][2].disable,c[0][2].lock),g.add(b[3].fire),f[b[0]]=function(){return f[b[0]+"With"](this===f?void 0:this,arguments),this},f[b[0]+"With"]=g.fireWith}),e.promise(f),b&&b.call(f,f),f},when:function(a){var b=arguments.length,c=b,d=Array(c),e=f.call(arguments),g=r.Deferred(),h=function(a){return function(c){d[a]=this,e[a]=arguments.length>1?f.call(arguments):c,--b||g.resolveWith(d,e)}};if(b<=1&&(O(a,g.done(h(c)).resolve,g.reject),"pending"===g.state()||r.isFunction(e[c]&&e[c].then)))return g.then();while(c--)O(e[c],h(c),g.reject);return g.promise()}});var P=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;r.Deferred.exceptionHook=function(b,c){a.console&&a.console.warn&&b&&P.test(b.name)&&a.console.warn("jQuery.Deferred exception: "+b.message,b.stack,c)},r.readyException=function(b){a.setTimeout(function(){throw b})};var Q=r.Deferred();r.fn.ready=function(a){return Q.then(a)["catch"](function(a){r.readyException(a)}),this},r.extend({isReady:!1,readyWait:1,holdReady:function(a){a?r.readyWait++:r.ready(!0)},ready:function(a){(a===!0?--r.readyWait:r.isReady)||(r.isReady=!0,a!==!0&&--r.readyWait>0||Q.resolveWith(d,[r]))}}),r.ready.then=Q.then;function R(){d.removeEventListener("DOMContentLoaded",R),a.removeEventListener("load",R),r.ready()}"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(r.ready):(d.addEventListener("DOMContentLoaded",R),a.addEventListener("load",R));var S=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===r.type(c)){e=!0;for(h in c)S(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0, -r.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(r(a),c)})),b))for(;h1,null,!0)},removeData:function(a){return this.each(function(){W.remove(this,a)})}}),r.extend({queue:function(a,b,c){var d;if(a)return b=(b||"fx")+"queue",d=V.get(a,b),c&&(!d||r.isArray(c)?d=V.access(a,b,r.makeArray(c)):d.push(c)),d||[]},dequeue:function(a,b){b=b||"fx";var c=r.queue(a,b),d=c.length,e=c.shift(),f=r._queueHooks(a,b),g=function(){r.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return V.get(a,c)||V.access(a,c,{empty:r.Callbacks("once memory").add(function(){V.remove(a,[b+"queue",c])})})}}),r.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length\x20\t\r\n\f]+)/i,ja=/^$|\/(?:java|ecma)script/i,ka={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};ka.optgroup=ka.option,ka.tbody=ka.tfoot=ka.colgroup=ka.caption=ka.thead,ka.th=ka.td;function la(a,b){var c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&r.nodeName(a,b)?r.merge([a],c):c}function ma(a,b){for(var c=0,d=a.length;c-1)e&&e.push(f);else if(j=r.contains(f.ownerDocument,f),g=la(l.appendChild(f),"script"),j&&ma(g),c){k=0;while(f=g[k++])ja.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),o.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="",o.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var pa=d.documentElement,qa=/^key/,ra=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,sa=/^([^.]*)(?:\.(.+)|)/;function ta(){return!0}function ua(){return!1}function va(){try{return d.activeElement}catch(a){}}function wa(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)wa(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=ua;else if(!e)return a;return 1===f&&(g=e,e=function(a){return r().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=r.guid++)),a.each(function(){r.event.add(this,b,e,d,c)})}r.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=V.get(a);if(q){c.handler&&(f=c,c=f.handler,e=f.selector),e&&r.find.matchesSelector(pa,e),c.guid||(c.guid=r.guid++),(i=q.events)||(i=q.events={}),(g=q.handle)||(g=q.handle=function(b){return"undefined"!=typeof r&&r.event.triggered!==b.type?r.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(K)||[""],j=b.length;while(j--)h=sa.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n&&(l=r.event.special[n]||{},n=(e?l.delegateType:l.bindType)||n,l=r.event.special[n]||{},k=r.extend({type:n,origType:p,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&r.expr.match.needsContext.test(e),namespace:o.join(".")},f),(m=i[n])||(m=i[n]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,o,g)!==!1||a.addEventListener&&a.addEventListener(n,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),r.event.global[n]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=V.hasData(a)&&V.get(a);if(q&&(i=q.events)){b=(b||"").match(K)||[""],j=b.length;while(j--)if(h=sa.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n){l=r.event.special[n]||{},n=(d?l.delegateType:l.bindType)||n,m=i[n]||[],h=h[2]&&new RegExp("(^|\\.)"+o.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&p!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,o,q.handle)!==!1||r.removeEvent(a,n,q.handle),delete i[n])}else for(n in i)r.event.remove(a,n+b[j],c,d,!0);r.isEmptyObject(i)&&V.remove(a,"handle events")}},dispatch:function(a){var b=r.event.fix(a),c,d,e,f,g,h,i=new Array(arguments.length),j=(V.get(this,"events")||{})[b.type]||[],k=r.event.special[b.type]||{};for(i[0]=b,c=1;c-1:r.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h\x20\t\r\n\f]*)[^>]*)\/>/gi,ya=/\s*$/g;function Ca(a,b){return r.nodeName(a,"table")&&r.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a:a}function Da(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function Ea(a){var b=Aa.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Fa(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(V.hasData(a)&&(f=V.access(a),g=V.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;c1&&"string"==typeof q&&!o.checkClone&&za.test(q))return a.each(function(e){var f=a.eq(e);s&&(b[0]=q.call(this,e,f.html())),Ha(f,b,c,d)});if(m&&(e=oa(b,a[0].ownerDocument,!1,a,d),f=e.firstChild,1===e.childNodes.length&&(e=f),f||d)){for(h=r.map(la(e,"script"),Da),i=h.length;l")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=r.contains(a.ownerDocument,a);if(!(o.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||r.isXMLDoc(a)))for(g=la(h),f=la(a),d=0,e=f.length;d0&&ma(g,!i&&la(a,"script")),h},cleanData:function(a){for(var b,c,d,e=r.event.special,f=0;void 0!==(c=a[f]);f++)if(T(c)){if(b=c[V.expando]){if(b.events)for(d in b.events)e[d]?r.event.remove(c,d):r.removeEvent(c,d,b.handle);c[V.expando]=void 0}c[W.expando]&&(c[W.expando]=void 0)}}}),r.fn.extend({detach:function(a){return Ia(this,a,!0)},remove:function(a){return Ia(this,a)},text:function(a){return S(this,function(a){return void 0===a?r.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.appendChild(a)}})},prepend:function(){return Ha(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Ca(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ha(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(r.cleanData(la(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null!=a&&a,b=null==b?a:b,this.map(function(){return r.clone(this,a,b)})},html:function(a){return S(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!ya.test(a)&&!ka[(ia.exec(a)||["",""])[1].toLowerCase()]){a=r.htmlPrefilter(a);try{for(;c1)}});function Xa(a,b,c,d,e){return new Xa.prototype.init(a,b,c,d,e)}r.Tween=Xa,Xa.prototype={constructor:Xa,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||r.easing._default,this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(r.cssNumber[c]?"":"px")},cur:function(){var a=Xa.propHooks[this.prop];return a&&a.get?a.get(this):Xa.propHooks._default.get(this)},run:function(a){var b,c=Xa.propHooks[this.prop];return this.options.duration?this.pos=b=r.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Xa.propHooks._default.set(this),this}},Xa.prototype.init.prototype=Xa.prototype,Xa.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=r.css(a.elem,a.prop,""),b&&"auto"!==b?b:0)},set:function(a){r.fx.step[a.prop]?r.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[r.cssProps[a.prop]]&&!r.cssHooks[a.prop]?a.elem[a.prop]=a.now:r.style(a.elem,a.prop,a.now+a.unit)}}},Xa.propHooks.scrollTop=Xa.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},r.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:"swing"},r.fx=Xa.prototype.init,r.fx.step={};var Ya,Za,$a=/^(?:toggle|show|hide)$/,_a=/queueHooks$/;function ab(){Za&&(a.requestAnimationFrame(ab),r.fx.tick())}function bb(){return a.setTimeout(function(){Ya=void 0}),Ya=r.now()}function cb(a,b){var c,d=0,e={height:a};for(b=b?1:0;d<4;d+=2-b)c=aa[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function db(a,b,c){for(var d,e=(gb.tweeners[b]||[]).concat(gb.tweeners["*"]),f=0,g=e.length;f1)},removeAttr:function(a){return this.each(function(){r.removeAttr(this,a)})}}),r.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return"undefined"==typeof a.getAttribute?r.prop(a,b,c):(1===f&&r.isXMLDoc(a)||(e=r.attrHooks[b.toLowerCase()]||(r.expr.match.bool.test(b)?hb:void 0)),void 0!==c?null===c?void r.removeAttr(a,b):e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+""),c):e&&"get"in e&&null!==(d=e.get(a,b))?d:(d=r.find.attr(a,b),null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!o.radioValue&&"radio"===b&&r.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d=0,e=b&&b.match(K); -if(e&&1===a.nodeType)while(c=e[d++])a.removeAttribute(c)}}),hb={set:function(a,b,c){return b===!1?r.removeAttr(a,c):a.setAttribute(c,c),c}},r.each(r.expr.match.bool.source.match(/\w+/g),function(a,b){var c=ib[b]||r.find.attr;ib[b]=function(a,b,d){var e,f,g=b.toLowerCase();return d||(f=ib[g],ib[g]=e,e=null!=c(a,b,d)?g:null,ib[g]=f),e}});var jb=/^(?:input|select|textarea|button)$/i,kb=/^(?:a|area)$/i;r.fn.extend({prop:function(a,b){return S(this,r.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[r.propFix[a]||a]})}}),r.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&r.isXMLDoc(a)||(b=r.propFix[b]||b,e=r.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=r.find.attr(a,"tabindex");return b?parseInt(b,10):jb.test(a.nodeName)||kb.test(a.nodeName)&&a.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),o.optSelected||(r.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null},set:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex)}}),r.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){r.propFix[this.toLowerCase()]=this});var lb=/[\t\r\n\f]/g;function mb(a){return a.getAttribute&&a.getAttribute("class")||""}r.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(r.isFunction(a))return this.each(function(b){r(this).addClass(a.call(this,b,mb(this)))});if("string"==typeof a&&a){b=a.match(K)||[];while(c=this[i++])if(e=mb(c),d=1===c.nodeType&&(" "+e+" ").replace(lb," ")){g=0;while(f=b[g++])d.indexOf(" "+f+" ")<0&&(d+=f+" ");h=r.trim(d),e!==h&&c.setAttribute("class",h)}}return this},removeClass:function(a){var b,c,d,e,f,g,h,i=0;if(r.isFunction(a))return this.each(function(b){r(this).removeClass(a.call(this,b,mb(this)))});if(!arguments.length)return this.attr("class","");if("string"==typeof a&&a){b=a.match(K)||[];while(c=this[i++])if(e=mb(c),d=1===c.nodeType&&(" "+e+" ").replace(lb," ")){g=0;while(f=b[g++])while(d.indexOf(" "+f+" ")>-1)d=d.replace(" "+f+" "," ");h=r.trim(d),e!==h&&c.setAttribute("class",h)}}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):r.isFunction(a)?this.each(function(c){r(this).toggleClass(a.call(this,c,mb(this),b),b)}):this.each(function(){var b,d,e,f;if("string"===c){d=0,e=r(this),f=a.match(K)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else void 0!==a&&"boolean"!==c||(b=mb(this),b&&V.set(this,"__className__",b),this.setAttribute&&this.setAttribute("class",b||a===!1?"":V.get(this,"__className__")||""))})},hasClass:function(a){var b,c,d=0;b=" "+a+" ";while(c=this[d++])if(1===c.nodeType&&(" "+mb(c)+" ").replace(lb," ").indexOf(b)>-1)return!0;return!1}});var nb=/\r/g,ob=/[\x20\t\r\n\f]+/g;r.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=r.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,r(this).val()):a,null==e?e="":"number"==typeof e?e+="":r.isArray(e)&&(e=r.map(e,function(a){return null==a?"":a+""})),b=r.valHooks[this.type]||r.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=r.valHooks[e.type]||r.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(nb,""):null==c?"":c)}}}),r.extend({valHooks:{option:{get:function(a){var b=r.find.attr(a,"value");return null!=b?b:r.trim(r.text(a)).replace(ob," ")}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type,g=f?null:[],h=f?e+1:d.length,i=e<0?h:f?e:0;i-1)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),r.each(["radio","checkbox"],function(){r.valHooks[this]={set:function(a,b){if(r.isArray(b))return a.checked=r.inArray(r(a).val(),b)>-1}},o.checkOn||(r.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var pb=/^(?:focusinfocus|focusoutblur)$/;r.extend(r.event,{trigger:function(b,c,e,f){var g,h,i,j,k,m,n,o=[e||d],p=l.call(b,"type")?b.type:b,q=l.call(b,"namespace")?b.namespace.split("."):[];if(h=i=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!pb.test(p+r.event.triggered)&&(p.indexOf(".")>-1&&(q=p.split("."),p=q.shift(),q.sort()),k=p.indexOf(":")<0&&"on"+p,b=b[r.expando]?b:new r.Event(p,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=q.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:r.makeArray(c,[b]),n=r.event.special[p]||{},f||!n.trigger||n.trigger.apply(e,c)!==!1)){if(!f&&!n.noBubble&&!r.isWindow(e)){for(j=n.delegateType||p,pb.test(j+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),i=h;i===(e.ownerDocument||d)&&o.push(i.defaultView||i.parentWindow||a)}g=0;while((h=o[g++])&&!b.isPropagationStopped())b.type=g>1?j:n.bindType||p,m=(V.get(h,"events")||{})[b.type]&&V.get(h,"handle"),m&&m.apply(h,c),m=k&&h[k],m&&m.apply&&T(h)&&(b.result=m.apply(h,c),b.result===!1&&b.preventDefault());return b.type=p,f||b.isDefaultPrevented()||n._default&&n._default.apply(o.pop(),c)!==!1||!T(e)||k&&r.isFunction(e[p])&&!r.isWindow(e)&&(i=e[k],i&&(e[k]=null),r.event.triggered=p,e[p](),r.event.triggered=void 0,i&&(e[k]=i)),b.result}},simulate:function(a,b,c){var d=r.extend(new r.Event,c,{type:a,isSimulated:!0});r.event.trigger(d,null,b)}}),r.fn.extend({trigger:function(a,b){return this.each(function(){r.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];if(c)return r.event.trigger(a,b,c,!0)}}),r.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(a,b){r.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),r.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),o.focusin="onfocusin"in a,o.focusin||r.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){r.event.simulate(b,a.target,r.event.fix(a))};r.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=V.access(d,b);e||d.addEventListener(a,c,!0),V.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=V.access(d,b)-1;e?V.access(d,b,e):(d.removeEventListener(a,c,!0),V.remove(d,b))}}});var qb=a.location,rb=r.now(),sb=/\?/;r.parseXML=function(b){var c;if(!b||"string"!=typeof b)return null;try{c=(new a.DOMParser).parseFromString(b,"text/xml")}catch(d){c=void 0}return c&&!c.getElementsByTagName("parsererror").length||r.error("Invalid XML: "+b),c};var tb=/\[\]$/,ub=/\r?\n/g,vb=/^(?:submit|button|image|reset|file)$/i,wb=/^(?:input|select|textarea|keygen)/i;function xb(a,b,c,d){var e;if(r.isArray(b))r.each(b,function(b,e){c||tb.test(a)?d(a,e):xb(a+"["+("object"==typeof e&&null!=e?b:"")+"]",e,c,d)});else if(c||"object"!==r.type(b))d(a,b);else for(e in b)xb(a+"["+e+"]",b[e],c,d)}r.param=function(a,b){var c,d=[],e=function(a,b){var c=r.isFunction(b)?b():b;d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(null==c?"":c)};if(r.isArray(a)||a.jquery&&!r.isPlainObject(a))r.each(a,function(){e(this.name,this.value)});else for(c in a)xb(c,a[c],b,e);return d.join("&")},r.fn.extend({serialize:function(){return r.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=r.prop(this,"elements");return a?r.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!r(this).is(":disabled")&&wb.test(this.nodeName)&&!vb.test(a)&&(this.checked||!ha.test(a))}).map(function(a,b){var c=r(this).val();return null==c?null:r.isArray(c)?r.map(c,function(a){return{name:b.name,value:a.replace(ub,"\r\n")}}):{name:b.name,value:c.replace(ub,"\r\n")}}).get()}});var yb=/%20/g,zb=/#.*$/,Ab=/([?&])_=[^&]*/,Bb=/^(.*?):[ \t]*([^\r\n]*)$/gm,Cb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Db=/^(?:GET|HEAD)$/,Eb=/^\/\//,Fb={},Gb={},Hb="*/".concat("*"),Ib=d.createElement("a");Ib.href=qb.href;function Jb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(K)||[];if(r.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Kb(a,b,c,d){var e={},f=a===Gb;function g(h){var i;return e[h]=!0,r.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Lb(a,b){var c,d,e=r.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&r.extend(!0,a,d),a}function Mb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}if(f)return f!==i[0]&&i.unshift(f),c[f]}function Nb(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}r.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:qb.href,type:"GET",isLocal:Cb.test(qb.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Hb,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":r.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Lb(Lb(a,r.ajaxSettings),b):Lb(r.ajaxSettings,a)},ajaxPrefilter:Jb(Fb),ajaxTransport:Jb(Gb),ajax:function(b,c){"object"==typeof b&&(c=b,b=void 0),c=c||{};var e,f,g,h,i,j,k,l,m,n,o=r.ajaxSetup({},c),p=o.context||o,q=o.context&&(p.nodeType||p.jquery)?r(p):r.event,s=r.Deferred(),t=r.Callbacks("once memory"),u=o.statusCode||{},v={},w={},x="canceled",y={readyState:0,getResponseHeader:function(a){var b;if(k){if(!h){h={};while(b=Bb.exec(g))h[b[1].toLowerCase()]=b[2]}b=h[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return k?g:null},setRequestHeader:function(a,b){return null==k&&(a=w[a.toLowerCase()]=w[a.toLowerCase()]||a,v[a]=b),this},overrideMimeType:function(a){return null==k&&(o.mimeType=a),this},statusCode:function(a){var b;if(a)if(k)y.always(a[y.status]);else for(b in a)u[b]=[u[b],a[b]];return this},abort:function(a){var b=a||x;return e&&e.abort(b),A(0,b),this}};if(s.promise(y),o.url=((b||o.url||qb.href)+"").replace(Eb,qb.protocol+"//"),o.type=c.method||c.type||o.method||o.type,o.dataTypes=(o.dataType||"*").toLowerCase().match(K)||[""],null==o.crossDomain){j=d.createElement("a");try{j.href=o.url,j.href=j.href,o.crossDomain=Ib.protocol+"//"+Ib.host!=j.protocol+"//"+j.host}catch(z){o.crossDomain=!0}}if(o.data&&o.processData&&"string"!=typeof o.data&&(o.data=r.param(o.data,o.traditional)),Kb(Fb,o,c,y),k)return y;l=r.event&&o.global,l&&0===r.active++&&r.event.trigger("ajaxStart"),o.type=o.type.toUpperCase(),o.hasContent=!Db.test(o.type),f=o.url.replace(zb,""),o.hasContent?o.data&&o.processData&&0===(o.contentType||"").indexOf("application/x-www-form-urlencoded")&&(o.data=o.data.replace(yb,"+")):(n=o.url.slice(f.length),o.data&&(f+=(sb.test(f)?"&":"?")+o.data,delete o.data),o.cache===!1&&(f=f.replace(Ab,""),n=(sb.test(f)?"&":"?")+"_="+rb++ +n),o.url=f+n),o.ifModified&&(r.lastModified[f]&&y.setRequestHeader("If-Modified-Since",r.lastModified[f]),r.etag[f]&&y.setRequestHeader("If-None-Match",r.etag[f])),(o.data&&o.hasContent&&o.contentType!==!1||c.contentType)&&y.setRequestHeader("Content-Type",o.contentType),y.setRequestHeader("Accept",o.dataTypes[0]&&o.accepts[o.dataTypes[0]]?o.accepts[o.dataTypes[0]]+("*"!==o.dataTypes[0]?", "+Hb+"; q=0.01":""):o.accepts["*"]);for(m in o.headers)y.setRequestHeader(m,o.headers[m]);if(o.beforeSend&&(o.beforeSend.call(p,y,o)===!1||k))return y.abort();if(x="abort",t.add(o.complete),y.done(o.success),y.fail(o.error),e=Kb(Gb,o,c,y)){if(y.readyState=1,l&&q.trigger("ajaxSend",[y,o]),k)return y;o.async&&o.timeout>0&&(i=a.setTimeout(function(){y.abort("timeout")},o.timeout));try{k=!1,e.send(v,A)}catch(z){if(k)throw z;A(-1,z)}}else A(-1,"No Transport");function A(b,c,d,h){var j,m,n,v,w,x=c;k||(k=!0,i&&a.clearTimeout(i),e=void 0,g=h||"",y.readyState=b>0?4:0,j=b>=200&&b<300||304===b,d&&(v=Mb(o,y,d)),v=Nb(o,v,y,j),j?(o.ifModified&&(w=y.getResponseHeader("Last-Modified"),w&&(r.lastModified[f]=w),w=y.getResponseHeader("etag"),w&&(r.etag[f]=w)),204===b||"HEAD"===o.type?x="nocontent":304===b?x="notmodified":(x=v.state,m=v.data,n=v.error,j=!n)):(n=x,!b&&x||(x="error",b<0&&(b=0))),y.status=b,y.statusText=(c||x)+"",j?s.resolveWith(p,[m,x,y]):s.rejectWith(p,[y,x,n]),y.statusCode(u),u=void 0,l&&q.trigger(j?"ajaxSuccess":"ajaxError",[y,o,j?m:n]),t.fireWith(p,[y,x]),l&&(q.trigger("ajaxComplete",[y,o]),--r.active||r.event.trigger("ajaxStop")))}return y},getJSON:function(a,b,c){return r.get(a,b,c,"json")},getScript:function(a,b){return r.get(a,void 0,b,"script")}}),r.each(["get","post"],function(a,b){r[b]=function(a,c,d,e){return r.isFunction(c)&&(e=e||d,d=c,c=void 0),r.ajax(r.extend({url:a,type:b,dataType:e,data:c,success:d},r.isPlainObject(a)&&a))}}),r._evalUrl=function(a){return r.ajax({url:a,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},r.fn.extend({wrapAll:function(a){var b;return this[0]&&(r.isFunction(a)&&(a=a.call(this[0])),b=r(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this},wrapInner:function(a){return r.isFunction(a)?this.each(function(b){r(this).wrapInner(a.call(this,b))}):this.each(function(){var b=r(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=r.isFunction(a);return this.each(function(c){r(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(a){return this.parent(a).not("body").each(function(){r(this).replaceWith(this.childNodes)}),this}}),r.expr.pseudos.hidden=function(a){return!r.expr.pseudos.visible(a)},r.expr.pseudos.visible=function(a){return!!(a.offsetWidth||a.offsetHeight||a.getClientRects().length)},r.ajaxSettings.xhr=function(){try{return new a.XMLHttpRequest}catch(b){}};var Ob={0:200,1223:204},Pb=r.ajaxSettings.xhr();o.cors=!!Pb&&"withCredentials"in Pb,o.ajax=Pb=!!Pb,r.ajaxTransport(function(b){var c,d;if(o.cors||Pb&&!b.crossDomain)return{send:function(e,f){var g,h=b.xhr();if(h.open(b.type,b.url,b.async,b.username,b.password),b.xhrFields)for(g in b.xhrFields)h[g]=b.xhrFields[g];b.mimeType&&h.overrideMimeType&&h.overrideMimeType(b.mimeType),b.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest");for(g in e)h.setRequestHeader(g,e[g]);c=function(a){return function(){c&&(c=d=h.onload=h.onerror=h.onabort=h.onreadystatechange=null,"abort"===a?h.abort():"error"===a?"number"!=typeof h.status?f(0,"error"):f(h.status,h.statusText):f(Ob[h.status]||h.status,h.statusText,"text"!==(h.responseType||"text")||"string"!=typeof h.responseText?{binary:h.response}:{text:h.responseText},h.getAllResponseHeaders()))}},h.onload=c(),d=h.onerror=c("error"),void 0!==h.onabort?h.onabort=d:h.onreadystatechange=function(){4===h.readyState&&a.setTimeout(function(){c&&d()})},c=c("abort");try{h.send(b.hasContent&&b.data||null)}catch(i){if(c)throw i}},abort:function(){c&&c()}}}),r.ajaxPrefilter(function(a){a.crossDomain&&(a.contents.script=!1)}),r.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(a){return r.globalEval(a),a}}}),r.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),r.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(e,f){b=r(" + @@ -30,8 +22,7 @@ - - +
@@ -43,8 +34,378 @@

Index

+ A + | B + | C + | D + | G + | H + | I + | J + | K + | L + | N + | P + | R + | S + | T + | U + | V + | W
+

A

+ + + +
+ +

B

+ + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

G

+ + + +
+ +

H

+ + +
+ +

I

+ + + +
+ +

J

+ + +
+ +

K

+ + +
+ +

L

+ + + +
+ +

N

+ + +
+ +

P

+ + + +
+ +

R

+ + +
+ +

S

+ + + +
+ +

T

+ + +
+ +

U

+ + +
+ +

V

+ + +
+ +

W

+ + + +
+
@@ -70,41 +431,32 @@

aiodocker

- - -

- - https://secure.travis-ci.org/aio-libs/aiodocker.svg?branch=master - -

- - - - - - -

- - https://codecov.io/github/aio-libs/aiodocker/coverage.svg?branch=master - -

Navigation

+ @@ -112,10 +464,10 @@

Quick search

diff --git a/docs/_build/html/index.html b/docs/_build/html/index.html index d8298728..48513651 100644 --- a/docs/_build/html/index.html +++ b/docs/_build/html/index.html @@ -4,33 +4,25 @@ + - aiodocker: AsyncIO bindings for docker.io — aiodocker 0.8.0-a0 documentation + aiodocker: AsyncIO bindings for docker.io — aiodocker 0.11.0-a0 documentation - + + - - +
@@ -48,13 +40,13 @@

aiodocker: AsyncIO bindings for docker.io

Installation

-
pip install aiodocker
+
pip install aiodocker
 

Examples

-
import asyncio
+
import asyncio
 import aiodocker
 
 async def list_things():
@@ -105,9 +97,13 @@ 

Communication channelsWe support Stack Overflow. Please add python-asyncio tag to your question there.

+
+

Contribution

+

Please follow the Contribution Guide.

+

Author and License

-

The aiodocker package is written by Andrew Svetlov.

+

The aiodocker package is written by Andrew Svetlov.

It’s Apache 2 licensed and freely available.

@@ -147,41 +143,32 @@

aiodocker

- - -

- - https://secure.travis-ci.org/aio-libs/aiodocker.svg?branch=master - -

- - - - - - -

- - https://codecov.io/github/aio-libs/aiodocker/coverage.svg?branch=master - -

Navigation

+
@@ -189,10 +176,10 @@

Quick search

diff --git a/docs/api.rst b/docs/api.rst new file mode 100644 index 00000000..c504c9e7 --- /dev/null +++ b/docs/api.rst @@ -0,0 +1,38 @@ +============= +Low-level API +============= + +The main object-orientated API is built on top of :py:class:`APIClient`. Each method on :py:class:`APIClient` maps one-to-one with a REST API endpoint, and returns the response that the API responds with. + +It's possible to use :py:class:`APIClient` directly. Some basic things (e.g. running a container) consist of several API calls and are complex to do with the low-level API, but it's useful if you need extra flexibility and power. + + +.. py:module:: aiodocker.api + +.. autoclass:: aiodocker.api.APIClient + :members: + :undoc-members: + +.. _low-level-containers: + +Containers +---------- + +.. py:module:: aiodocker.api.container + +.. rst-class:: hide-signature +.. autoclass:: DockerContainerAPI + :members: + :undoc-members: + +.. _low-level-images: + +Images +------ + +.. py:module:: aiodocker.api.image + +.. rst-class:: hide-signature +.. autoclass:: DockerImageAPI + :members: + :undoc-members: diff --git a/docs/exceptions.rst b/docs/exceptions.rst index f588cf9e..e45fa96a 100644 --- a/docs/exceptions.rst +++ b/docs/exceptions.rst @@ -12,3 +12,19 @@ DockerError .. autoclass:: aiodocker.exceptions.DockerError :members: :undoc-members: + +.. autoclass:: aiodocker.errors.DockerException + :members: + :undoc-members: + +.. autoclass:: aiodocker.errors.APIError + :members: + :undoc-members: + +.. autoclass:: aiodocker.errors.NotFound + :members: + :undoc-members: + +.. autoclass:: aiodocker.errors.ImageNotFound + :members: + :undoc-members: diff --git a/docs/index.rst b/docs/index.rst index 2fb1e64b..c3ed06d7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -131,6 +131,7 @@ It's *Apache 2* licensed and freely available. log events exceptions + api Indices and tables ================== diff --git a/examples/info_new_api.py b/examples/info_new_api.py new file mode 100755 index 00000000..88acaa3b --- /dev/null +++ b/examples/info_new_api.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import asyncio +from aiodocker.client import DockerClient +from aiodocker.errors import ImageNotFound + + +async def demo(client): + print('--------------------------------') + print('- Check Docker Version Information') + data_version = await client.version() + for key, value in data_version.items(): + print(key, ':', value) + + print('--------------------------------') + print('- Check Docker Image List') + images = await client.images.list() + for image in images: + print('Id: {} RepoTags: {}'.format(image.short_id, image.tags)) + + print('--------------------------------') + print('- Check Docker Container List') + containers = await client.containers.list() + for container in containers: + print('Id: {} Name: {}'.format(container.id, container.name)) + print('--------------------------------') + print('- Check for non-existing Image') + try: + await client.images.get('non-existing-image') + except ImageNotFound as e: + print(e) + + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + docker_client = DockerClient() + try: + loop.run_until_complete(demo(docker_client)) + finally: + loop.run_until_complete(docker_client.close()) + loop.close() diff --git a/tests/new-api/conftest.py b/tests/new-api/conftest.py new file mode 100644 index 00000000..0e4f3d7a --- /dev/null +++ b/tests/new-api/conftest.py @@ -0,0 +1,161 @@ +import asyncio +import uuid +from distutils.version import StrictVersion +from os import environ as ENV +import traceback + +import pytest + +from aiodocker.client import DockerClient +from aiodocker.errors import NotFound, ImageNotFound, APIError + + +_api_versions = { + "17.06": "v1.30", +} + + +def _random_name(): + return "aiodocker-" + uuid.uuid4().hex[:7] + + +@pytest.fixture(scope='session') +def random_name(): + yield _random_name + + # If some test cases have used randomly-named temporary images, + # we need to clean up them! + if ENV.get('CI', '') == 'true': + # But inside the CI server, we don't need clean up! + return + event_loop = asyncio.get_event_loop() + + async def _clean(): + docker_client = DockerClient() + images = await docker_client.images.list() + for img in images: + if not img.tags: + continue + try: + if img.tags[0].startswith('aiodocker-'): + print('Deleting image id: {0}'.format(img.id)) + await docker_client.images.remove(img.id, force=True) + except APIError as e: + traceback.print_exc() + await docker_client.close() + + event_loop.run_until_complete(_clean()) + + +@pytest.fixture(scope='session') +def testing_images(): + # Prepare a small Linux image shared by most test cases. + event_loop = asyncio.get_event_loop() + + async def _pull(): + docker_client = DockerClient() + required_images = [ + 'alpine:latest', + 'redis:latest', + 'redis:3.0.2', + 'redis:4.0', + 'python:3.6.1-alpine', + ] + for img in required_images: + try: + await docker_client.images.get(img) + except ImageNotFound: + print('Pulling "{img}" for the testing session...'.format(img=img)) + await docker_client.images.pull(img) + await docker_client.close() + + event_loop.run_until_complete(_pull()) + + +@pytest.fixture +def docker(event_loop, testing_images): + kwargs = {} + if "DOCKER_VERSION" in ENV: + kwargs["api_version"] = _api_versions[ENV["DOCKER_VERSION"]] + docker = DockerClient(**kwargs) + yield docker + + async def _finalize(): + await docker.close() + event_loop.run_until_complete(_finalize()) + + +@pytest.fixture +def requires_api_version(docker): + event_loop = asyncio.get_event_loop() + + async def _get_version(): + return await docker.version() + + _version = event_loop.run_until_complete(_get_version()) + + def check(version, reason): + if StrictVersion(_version[1:]) < StrictVersion(version[1:]): + pytest.skip(reason) + + yield check + + +@pytest.fixture +def swarm(event_loop, docker): + assert event_loop.run_until_complete(docker.swarm.init()) + yield docker + assert event_loop.run_until_complete(docker.swarm.leave(force=True)) + + +@pytest.fixture +def shell_container(event_loop, docker): + container = None + config = { + "Cmd": ["/bin/ash"], + "Image": "alpine:latest", + "AttachStdin": False, + "AttachStdout": False, + "AttachStderr": False, + "Tty": True, + "OpenStdin": True, + } + + async def _spawn(): + nonlocal container + container = await docker.containers.create( + config=config, + name='aiodocker-testing-shell') + await container.start() + event_loop.run_until_complete(_spawn()) + + yield container + + async def _delete(): + nonlocal container + await container.delete(force=True) + event_loop.run_until_complete(_delete()) + + +@pytest.fixture +def redis_container(event_loop, docker): + container = None + config = { + "Image": "redis:latest", + "PublishAllPorts": True, + } + + async def _spawn(): + nonlocal container + container = await docker.containers.create( + config=config, + name='aiodocker-testing-redis') + await container.start() + event_loop.run_until_complete(_spawn()) + + yield container + + async def _delete(): + nonlocal container + await container.delete(force=True) + event_loop.run_until_complete(_delete()) diff --git a/tests/new-api/test_containers_api.py b/tests/new-api/test_containers_api.py new file mode 100644 index 00000000..d961d0db --- /dev/null +++ b/tests/new-api/test_containers_api.py @@ -0,0 +1,88 @@ +import asyncio +import pytest + +from aiodocker.errors import ImageNotFound, APIError +from aiodocker.exceptions import DockerContainerError + + +async def _validate_hello(container): + try: + await container.start() + response = await container.wait() + assert response['StatusCode'] == 0 + logs = await container.logs(stdout=True) + assert logs == ['hello'] + + with pytest.raises(TypeError): + await container.logs() + finally: + await container.remove(force=True) + + +@pytest.mark.asyncio +async def test_run_existing_container(docker): + name = "alpine:latest" + await docker.images.pull(name) + container = await docker.containers.run(name, command=['-c', 'echo hello'], entrypoint='sh') + + await _validate_hello(container) + + +@pytest.mark.asyncio +async def test_run_container_with_missing_image(docker): + name = "alpine:latest" + try: + await asyncio.sleep(1) + await docker.images.remove(name, force=True) + except ImageNotFound as e: + pass # already missing, pass + except APIError: + raise + + # should automatically pull the image + container = await docker.containers.run(name, command=['-c', 'echo hello'], entrypoint='sh') + + await _validate_hello(container) + + +@pytest.mark.asyncio +async def test_run_failing_start_container(docker): + name = "alpine:latest" + try: + await docker.images.remove(name, force=True) + except ImageNotFound as e: + pass # already missing, pass + except APIError: + raise + + with pytest.raises(DockerContainerError) as e_info: + # we want to raise an error + # `executable file not found` + await docker.containers.run(name, command=['pyton', 'echo hello']) + + assert e_info.value.container_id + # This container is created but not started! + # We should delete it afterwards. + cid = e_info.value.container_id + container = await docker.containers.get(cid) + await container.remove() + + +@pytest.mark.asyncio +async def test_restart(docker): + container = await docker.containers.run('gcr.io/google-containers/pause') + try: + container = await docker.containers.get(container.id) + assert container.status == 'running' + startTime = container.attrs['State']['StartedAt'] + await container.restart(timeout=1) + await asyncio.sleep(3) + container = await docker.containers.get(container.id) + assert container.attrs['State']['Running'] + restartTime = container.attrs['State']['StartedAt'] + + assert restartTime > startTime + + await container.stop() + finally: + await container.remove(force=True) diff --git a/tests/new-api/test_images_api.py b/tests/new-api/test_images_api.py new file mode 100644 index 00000000..084e582b --- /dev/null +++ b/tests/new-api/test_images_api.py @@ -0,0 +1,158 @@ +from io import BytesIO + +import pytest +from aiodocker import utils +from aiodocker.errors import ImageNotFound + + +@pytest.mark.asyncio +async def test_build_from_remote_file(docker, random_name): + remote = ("https://raw.githubusercontent.com/aio-libs/" + "aiodocker/master/tests/docker/Dockerfile") + + tag = "{}:1.0".format(random_name()) + params = { + 'tag': tag, + 'remote': remote + } + await docker.images.build(**params) + + image = await docker.images.get(tag) + assert image + + +@pytest.mark.asyncio +async def test_build_from_remote_tar(docker, random_name): + remote = ("https://github.com/aio-libs/aiodocker/" + "raw/master/tests/docker/docker_context.tar") + + tag = "{}:1.0".format(random_name()) + params = { + 'tag': tag, + 'remote': remote + } + await docker.images.build(**params) + + image = await docker.images.get(tag) + assert image + + +@pytest.mark.asyncio +async def test_history(docker): + name = "alpine:latest" + image = await docker.images.get(name) + history = await image.history() + assert history + + +@pytest.mark.asyncio +async def test_list_images(docker): + name = "alpine:latest" + images = await docker.images.list(name=name) + assert len(images) == 1 + + +@pytest.mark.asyncio +async def test_tag_image(docker, random_name): + name = "alpine:latest" + image = await docker.images.get(name) + repository = random_name() + await image.tag(repository=repository, tag="1.0") + await image.tag(repository=repository, tag="2.0") + image = await docker.images.get(name) + assert len([x for x in image.tags if x.startswith(repository)]) == 2 + + +@pytest.mark.asyncio +async def test_push_image(docker): + name = "alpine:latest" + repository = "localhost:5000/image" + image = await docker.images.get(name) + await image.tag(repository=repository) + await docker.images.push(repository=repository) + + +@pytest.mark.asyncio +async def test_delete_image(docker): + name = "alpine:latest" + repository = "localhost:5000/image" + image = await docker.images.get(name) + await image.tag(repository=repository) + assert await docker.images.get(repository) + await docker.images.remove(name=repository) + images = await docker.images.list(name=repository) + assert len(images) == 0 + + +@pytest.mark.asyncio +async def test_not_existing_image(docker, random_name): + name = "{}:latest".format(random_name()) + with pytest.raises(ImageNotFound) as excinfo: + await docker.images.get(name=name) + assert isinstance(excinfo.value, ImageNotFound) + + +@pytest.mark.asyncio +async def test_pull_image(docker): + name = "alpine:latest" + image = await docker.images.get(name=name) + assert image + + +@pytest.mark.asyncio +async def test_build_from_tar(docker, random_name): + name = "{}:latest".format(random_name()) + dockerfile = ''' + # Shared Volume + FROM alpine:latest + VOLUME /data + CMD ["/bin/sh"] + ''' + f = BytesIO(dockerfile.encode('utf-8')) + tar_obj = utils.mktar_from_dockerfile(f) + await docker.images.build(fileobj=tar_obj, encoding="gzip", tag=name) + tar_obj.close() + image = await docker.images.get(name=name) + assert image + + +@pytest.mark.asyncio +async def test_pups_image_auth(docker): + name = "alpine:latest" + await docker.images.pull(name) + image_obj = await docker.images.get(name=name) + repository = "localhost:5001/image:latest" + image, tag = repository.rsplit(':', 1) + await image_obj.tag(repository=image, tag=tag) + + auth_config = { + 'username': "testuser", + 'password': "testpassword" + } + + await docker.images.push(repository=repository, tag=tag, auth_config=auth_config) + + await docker.images.remove(name=repository) + await docker.images.pull(repository, + auth_config={ + "auth": "dGVzdHVzZXI6dGVzdHBhc3N3b3Jk" + }) + + await docker.images.get(repository) + await docker.images.remove(name=repository) + + # Now compose_auth_header automatically parse and rebuild + # the encoded value if required. + await docker.images.pull(repository, + auth_config="dGVzdHVzZXI6dGVzdHBhc3N3b3Jk") + with pytest.raises(ValueError): + # The repository arg must include the registry address. + await docker.images.pull("image:latest", + auth_config={ + "auth": "dGVzdHVzZXI6dGVzdHBhc3N3b3Jk" + }) + await docker.images.pull(repository, + auth_config={ + "auth": "dGVzdHVzZXI6dGVzdHBhc3N3b3Jk" + }) + await docker.images.get(repository) diff --git a/tests/new-api/test_system_api.py b/tests/new-api/test_system_api.py new file mode 100644 index 00000000..93fa56ad --- /dev/null +++ b/tests/new-api/test_system_api.py @@ -0,0 +1,8 @@ +import pytest + + +@pytest.mark.asyncio +async def test_system_info(docker): + docker_info = await docker.info() + assert 'ID' in docker_info + assert docker_info['ServerVersion'].startswith("1") diff --git a/tests/test_system.py b/tests/test_system.py index 3d190201..03c35b94 100644 --- a/tests/test_system.py +++ b/tests/test_system.py @@ -5,4 +5,4 @@ async def test_system_info(docker): docker_info = await docker.system.info() assert 'ID' in docker_info - assert docker_info['ServerVersion'].startswith("17") + assert docker_info['ServerVersion'].startswith("1")