diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py index 194c5a64..03b9f108 100644 --- a/benchmark/benchmark.py +++ b/benchmark/benchmark.py @@ -68,7 +68,7 @@ def __del__(self): stats[name] = dur results.append({"name": name, "size": size, "duration": dur}) - print("One worker, {}: ".format(size), stats) + print(f"One worker, {size}: {stats}") df = pd.DataFrame(results) df.to_csv("single_worker.csv", index=False, float_format="%.4f") @@ -132,7 +132,7 @@ async def httpx_worker(q, done, s): t.join() # print(stats) - async def test_asyncs_workers(): + async def test_asyncs_workers(url, size, stats): for name, worker, SessionClass in [ ("aiohttp", aiohttp_worker, aiohttp.ClientSession), ("httpx_async", httpx_worker, httpx.AsyncClient), @@ -156,8 +156,8 @@ async def test_asyncs_workers(): for w in workers: w.cancel() - asyncio.run(test_asyncs_workers()) - print("10 Workers, {}: ".format(size), stats) + asyncio.run(test_asyncs_workers(url, size, stats)) + print(f"10 Workers, {size}: {stats}") df = pd.DataFrame(results) df.to_csv("multiple_workers.csv", index=False, float_format="%.4f") diff --git a/curl_cffi/_asyncio_selector.py b/curl_cffi/_asyncio_selector.py index 7143fea3..646c505b 100644 --- a/curl_cffi/_asyncio_selector.py +++ b/curl_cffi/_asyncio_selector.py @@ -15,6 +15,7 @@ import socket import threading import typing +from contextlib import suppress from typing import ( Any, Callable, @@ -48,10 +49,8 @@ def _atexit_callback() -> None: with loop._select_cond: loop._closing_selector = True loop._select_cond.notify() - try: + with suppress(BlockingIOError): loop._waker_w.send(b"a") - except BlockingIOError: - pass if loop._thread is not None: # If we don't join our (daemon) thread here, we may get a deadlock # during interpreter shutdown. I don't really understand why. This @@ -152,16 +151,12 @@ async def _thread_manager(self) -> typing.AsyncGenerator[None, None]: def _wake_selector(self) -> None: if self._closed: return - try: + with suppress(BlockingIOError): self._waker_w.send(b"a") - except BlockingIOError: - pass def _consume_waker(self) -> None: - try: + with suppress(BlockingIOError): self._waker_r.recv(1024) - except BlockingIOError: - pass def _start_select(self) -> None: # Capture reader and writer sets here in the event loop diff --git a/curl_cffi/aio.py b/curl_cffi/aio.py index 27ac009d..3d6ca13b 100644 --- a/curl_cffi/aio.py +++ b/curl_cffi/aio.py @@ -36,7 +36,7 @@ def _get_selector(asyncio_loop) -> asyncio.AbstractEventLoop: if not isinstance(asyncio_loop, getattr(asyncio, "ProactorEventLoop", type(None))): return asyncio_loop - warnings.warn(PROACTOR_WARNING, RuntimeWarning) + warnings.warn(PROACTOR_WARNING, RuntimeWarning, stacklevel=2) from ._asyncio_selector import AddThreadSelectorEventLoop @@ -201,7 +201,7 @@ def socket_action(self, sockfd: int, ev_bitmask: int) -> int: def process_data(self, sockfd: int, ev_bitmask: int): """Call curl_multi_info_read to read data for given socket.""" if not self._curlm: - warnings.warn("Curlm alread closed! quitting from process_data") + warnings.warn("Curlm alread closed! quitting from process_data", stacklevel=2) return self.socket_action(sockfd, ev_bitmask) diff --git a/curl_cffi/curl.py b/curl_cffi/curl.py index c91480b7..630bd91b 100644 --- a/curl_cffi/curl.py +++ b/curl_cffi/curl.py @@ -69,11 +69,11 @@ def write_callback(ptr, size, nmemb, userdata): callback = ffi.from_handle(userdata) wrote = callback(ffi.buffer(ptr, nmemb)[:]) wrote = ensure_int(wrote) - if wrote == CURL_WRITEFUNC_PAUSE or wrote == CURL_WRITEFUNC_ERROR: + if wrote in (CURL_WRITEFUNC_PAUSE, CURL_WRITEFUNC_ERROR): return wrote # should make this an exception in future versions if wrote != nmemb * size: - warnings.warn("Wrote bytes != received bytes.", RuntimeWarning) + warnings.warn("Wrote bytes != received bytes.", RuntimeWarning, stacklevel=2) return nmemb * size @@ -101,7 +101,7 @@ def __init__(self, cacert: str = "", debug: bool = False, handle=None) -> None: debug: whether to show curl debug messages. handle: a curl handle instance from ``curl_easy_init``. """ - self._curl = lib.curl_easy_init() if not handle else handle + self._curl = handle if handle else lib.curl_easy_init() self._headers = ffi.NULL self._proxy_headers = ffi.NULL self._resolve = ffi.NULL @@ -118,7 +118,7 @@ def __init__(self, cacert: str = "", debug: bool = False, handle=None) -> None: def _set_error_buffer(self) -> None: ret = lib._curl_easy_setopt(self._curl, CurlOpt.ERRORBUFFER, self._error_buffer) if ret != 0: - warnings.warn("Failed to set error buffer") + warnings.warn("Failed to set error buffer", stacklevel=2) if self._debug: self.setopt(CurlOpt.VERBOSE, 1) lib._curl_easy_setopt(self._curl, CurlOpt.DEBUGFUNCTION, lib.debug_function) @@ -189,10 +189,7 @@ def setopt(self, option: CurlOpt, value: Any) -> int: lib._curl_easy_setopt(self._curl, CurlOpt.WRITEFUNCTION, lib.write_callback) option = CurlOpt.HEADERDATA elif value_type == "char*": - if isinstance(value, str): - c_value = value.encode() - else: - c_value = value + c_value = value.encode() if isinstance(value, str) else value # Must keep a reference, otherwise may be GCed. if option == CurlOpt.POSTFIELDS: self._body_handle = c_value diff --git a/curl_cffi/requests/__init__.py b/curl_cffi/requests/__init__.py index f24c4789..5e554149 100644 --- a/curl_cffi/requests/__init__.py +++ b/curl_cffi/requests/__init__.py @@ -72,8 +72,10 @@ def request( method: http method for the request: GET/POST/PUT/DELETE etc. url: url for the requests. params: query string for the requests. - data: form values or binary data to use in body, ``Content-Type: application/x-www-form-urlencoded`` will be added if a dict is given. - json: json values to use in body, `Content-Type: application/json` will be added automatically. + data: form values or binary data to use in body, + ``Content-Type: application/x-www-form-urlencoded`` will be added if a dict is given. + json: json values to use in body, `Content-Type: application/json` will be added + automatically. headers: headers to send. cookies: cookies to use. files: not supported, use ``multipart`` instead. @@ -82,12 +84,14 @@ def request( allow_redirects: whether to allow redirection. max_redirects: max redirect counts, default unlimited(-1). proxies: dict of proxies to use, format: ``{"http": proxy_url, "https": proxy_url}``. - proxy: proxy to use, format: "http://user@pass:proxy_url". Cannot be used with the above parameter. + proxy: proxy to use, format: "http://user@pass:proxy_url". + Can't be used with proxy parameter. proxy_auth: HTTP basic auth for proxy, a tuple of (username, password). verify: whether to verify https certs. referer: shortcut for setting referer header. accept_encoding: shortcut for setting accept-encoding header. - content_callback: a callback function to receive response body. ``def callback(chunk: bytes) -> None:`` + content_callback: a callback function to receive response body. + ``def callback(chunk: bytes) -> None:`` impersonate: which browser version to impersonate. thread: work with other thread implementations. choices: eventlet, gevent. default_headers: whether to set default browser headers. diff --git a/curl_cffi/requests/cookies.py b/curl_cffi/requests/cookies.py index 65e13b4d..b9bcb875 100644 --- a/curl_cffi/requests/cookies.py +++ b/curl_cffi/requests/cookies.py @@ -70,7 +70,7 @@ def from_curl_format(cls, set_cookie_line: bytes): def to_curl_format(self): if not self.hostname: - raise RequestsError("Domain not found for cookie {}={}".format(self.name, self.value)) + raise RequestsError(f"Domain not found for cookie {self.name}={self.value}") return "\t".join( [ self.hostname, @@ -114,7 +114,7 @@ def to_cookiejar_cookie(self) -> Cookie: secure=self.secure, # using if explicitly to make it clear. expires=None if self.expires == 0 else self.expires, - discard=True if self.expires == 0 else False, + discard=self.expires == 0, comment=None, comment_url=None, rest=dict(http_only=f"{self.http_only}"), @@ -193,12 +193,13 @@ def set(self, name: str, value: str, domain: str = "", path: str = "/", secure=F """ # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie if name.startswith("__Secure-") and secure is False: - warnings.warn("`secure` changed to True for `__Secure-` prefixed cookies") + warnings.warn("`secure` changed to True for `__Secure-` prefixed cookies", stacklevel=2) secure = True elif name.startswith("__Host-") and (secure is False or domain or path != "/"): warnings.warn( "`host` changed to True, `domain` removed, `path` changed to `/` " - "for `__Host-` prefixed cookies" + "for `__Host-` prefixed cookies", + stacklevel=2, ) secure = True domain = "" @@ -239,24 +240,26 @@ def get( # type: ignore value = None matched_domain = "" for cookie in self.jar: - if cookie.name == name: - if domain is None or cookie.domain == domain: - if path is None or cookie.path == path: - # if cookies on two different domains do not share a same value - if ( - value is not None - and not matched_domain.endswith(cookie.domain) - and not str(cookie.domain).endswith(matched_domain) - and value != cookie.value - ): - message = ( - f"Multiple cookies exist with name={name} on " - f"{matched_domain} and {cookie.domain}, add domain " - "parameter to suppress this error." - ) - raise CookieConflict(message) - value = cookie.value - matched_domain = cookie.domain or "" + if ( + cookie.name == name + and (domain is None or cookie.domain == domain) + and (path is None or cookie.path == path) + ): + # if cookies on two different domains do not share a same value + if ( + value is not None + and not matched_domain.endswith(cookie.domain) + and not str(cookie.domain).endswith(matched_domain) + and value != cookie.value + ): + message = ( + f"Multiple cookies exist with name={name} on " + f"{matched_domain} and {cookie.domain}, add domain " + "parameter to suppress this error." + ) + raise CookieConflict(message) + value = cookie.value + matched_domain = cookie.domain or "" if value is None: return default diff --git a/curl_cffi/requests/headers.py b/curl_cffi/requests/headers.py index 6a53a0e8..6e8149f7 100644 --- a/curl_cffi/requests/headers.py +++ b/curl_cffi/requests/headers.py @@ -80,10 +80,7 @@ def normalize_header_key( """ Coerce str/bytes into a strictly byte-wise HTTP header key. """ - if isinstance(value, bytes): - bytes_value = value - else: - bytes_value = value.encode(encoding or "ascii") + bytes_value = value if isinstance(value, bytes) else value.encode(encoding or "ascii") return bytes_value.lower() if lower else bytes_value @@ -250,7 +247,7 @@ def get_list(self, key: str, split_commas: bool = False) -> List[str]: def update(self, headers: Optional[HeaderTypes] = None) -> None: # type: ignore headers = Headers(headers) - for key in headers.keys(): + for key in headers: if key in self: self.pop(key) self._list.extend(headers._list) diff --git a/curl_cffi/requests/models.py b/curl_cffi/requests/models.py index 5615cbd9..7da491cf 100644 --- a/curl_cffi/requests/models.py +++ b/curl_cffi/requests/models.py @@ -44,7 +44,8 @@ class Response: elapsed: how many seconds the request cost. encoding: http body encoding. charset_encoding: encoding specified by the Content-Type header. - default_encoding: user-defined encoding used for decoding content if charset is not found in headers. + default_encoding: user-defined encoding used for decoding content if charset + is not found in headers. redirect_count: how many redirects happened. redirect_url: the final redirected url. http_version: http version used. @@ -123,10 +124,7 @@ def iter_lines(self, chunk_size=None, decode_unicode=False, delimiter=None): for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk - if delimiter: - lines = chunk.split(delimiter) - else: - lines = chunk.splitlines() + lines = chunk.split(delimiter) if delimiter else chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: @@ -142,7 +140,7 @@ def iter_content(self, chunk_size=None, decode_unicode=False): iterate streaming content chunk by chunk in bytes. """ if chunk_size: - warnings.warn("chunk_size is ignored, there is no way to tell curl that.") + warnings.warn("chunk_size is ignored, there is no way to tell curl that.", stacklevel=2) if decode_unicode: raise NotImplementedError() @@ -187,10 +185,7 @@ async def aiter_lines(self, chunk_size=None, decode_unicode=False, delimiter=Non async for chunk in self.aiter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk - if delimiter: - lines = chunk.split(delimiter) - else: - lines = chunk.splitlines() + lines = chunk.split(delimiter) if delimiter else chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: @@ -207,7 +202,7 @@ async def aiter_content(self, chunk_size=None, decode_unicode=False): iterate streaming content chunk by chunk in bytes. """ if chunk_size: - warnings.warn("chunk_size is ignored, there is no way to tell curl that.") + warnings.warn("chunk_size is ignored, there is no way to tell curl that.", stacklevel=2) if decode_unicode: raise NotImplementedError() diff --git a/curl_cffi/requests/session.py b/curl_cffi/requests/session.py index 68c00dc5..4cd2eca5 100644 --- a/curl_cffi/requests/session.py +++ b/curl_cffi/requests/session.py @@ -4,7 +4,7 @@ import threading import warnings from concurrent.futures import ThreadPoolExecutor -from contextlib import asynccontextmanager, contextmanager +from contextlib import asynccontextmanager, contextmanager, suppress from enum import Enum from functools import partialmethod from io import BytesIO @@ -32,15 +32,11 @@ from .models import Request, Response from .websockets import WebSocket -try: +with suppress(ImportError): import gevent -except ImportError: - pass -try: +with suppress(ImportError): import eventlet.tpool -except ImportError: - pass if TYPE_CHECKING: @@ -85,14 +81,8 @@ def has(cls, item): @classmethod def normalize(cls, item): - if item == "chrome": - return cls.chrome - elif item == "safari": - return cls.safari - elif item == "safari_ios": - return cls.safari_ios - else: - return item + item_map = {"chrome": cls.chrome, "safari": cls.safari, "safari_ios": cls.safari_ios} + return item_map.get(item, item) class BrowserSpec: @@ -313,7 +303,8 @@ def _set_curl_options( # Tell libcurl to be aware of bodies and related headers when, # 1. POST/PUT/PATCH, even if the body is empty, it's up to curl to decide what to do; - # 2. GET/DELETE with body, although it's against the RFC, some applications. e.g. Elasticsearch, use this. + # 2. GET/DELETE with body, although it's against the RFC, some applications. + # e.g. Elasticsearch, use this. if body or method in ("POST", "PUT", "PATCH"): c.setopt(CurlOpt.POSTFIELDS, body) # necessary if body contains '\0' @@ -613,7 +604,8 @@ def __init__( cookies: cookies to add in the session. auth: HTTP basic auth, a tuple of (username, password), only basic auth is supported. proxies: dict of proxies to use, format: {"http": proxy_url, "https": proxy_url}. - proxy: proxy to use, format: "http://proxy_url". Cannot be used with the above parameter. + proxy: proxy to use, format: "http://proxy_url". + Cannot be used with the above parameter. proxy_auth: HTTP basic auth for proxy, a tuple of (username, password). base_url: absolute url to use for relative urls. params: query string for the session. @@ -624,8 +616,8 @@ def __init__( max_redirects: max redirect counts, default unlimited(-1). impersonate: which browser version to impersonate in the session. interface: which interface use in request to server. - default_encoding: encoding for decoding response content if charset is not found in headers. - Defaults to "utf-8". Can be set to a callable for automatic detection. + default_encoding: encoding for decoding response content if charset is not found in + headers. Defaults to "utf-8". Can be set to a callable for automatic detection. Notes: This class can be used as a context manager. @@ -657,7 +649,7 @@ def __init__( def curl(self): if self._use_thread_local_curl: if self._is_customized_curl: - warnings.warn("Creating fresh curl handle in different thread.") + warnings.warn("Creating fresh curl handle in different thread.", stacklevel=2) if not getattr(self._local, "curl", None): self._local.curl = Curl(debug=self.debug) return self._local.curl @@ -892,12 +884,14 @@ def __init__( Parameters: loop: loop to use, if not provided, the running loop will be used. async_curl: [AsyncCurl](/api/curl_cffi#curl_cffi.AsyncCurl) object to use. - max_clients: maxmium curl handle to use in the session, this will affect the concurrency ratio. + max_clients: maxmium curl handle to use in the session, + this will affect the concurrency ratio. headers: headers to use in the session. cookies: cookies to add in the session. auth: HTTP basic auth, a tuple of (username, password), only basic auth is supported. proxies: dict of proxies to use, format: {"http": proxy_url, "https": proxy_url}. - proxy: proxy to use, format: "http://proxy_url". Cannot be used with the above parameter. + proxy: proxy to use, format: "http://proxy_url". + Cannot be used with the above parameter. proxy_auth: HTTP basic auth for proxy, a tuple of (username, password). base_url: absolute url to use for relative urls. params: query string for the session. @@ -907,8 +901,8 @@ def __init__( allow_redirects: whether to allow redirection. max_redirects: max redirect counts, default unlimited(-1). impersonate: which browser version to impersonate in the session. - default_encoding: encoding for decoding response content if charset is not found in headers. - Defaults to "utf-8". Can be set to a callable for automatic detection. + default_encoding: encoding for decoding response content if charset is not found in + headers. Defaults to "utf-8". Can be set to a callable for automatic detection. Notes: This class can be used as a context manager, and it's recommended to use via @@ -958,10 +952,8 @@ async def pop_curl(self): return curl def push_curl(self, curl): - try: + with suppress(asyncio.QueueFull): self.pool.put_nowait(curl) - except asyncio.QueueFull: - pass async def __aenter__(self): return self diff --git a/curl_cffi/requests/websockets.py b/curl_cffi/requests/websockets.py index c8a3594c..836ab536 100644 --- a/curl_cffi/requests/websockets.py +++ b/curl_cffi/requests/websockets.py @@ -109,10 +109,14 @@ def run_forever(self): try: code = struct.unpack_from("!H", msg)[0] reason = msg[2:].decode() - except UnicodeDecodeError: - raise WebSocketError("Invalid close message", WsCloseCode.INVALID_DATA) - except Exception: - raise WebSocketError("Invalid close frame", WsCloseCode.PROTOCOL_ERROR) + except UnicodeDecodeError as e: + raise WebSocketError( + "Invalid close message", WsCloseCode.INVALID_DATA + ) from e + except Exception as e: + raise WebSocketError( + "Invalid close frame", WsCloseCode.PROTOCOL_ERROR + ) from e else: if code < 3000 and (code not in WsCloseCode or code == 1005): raise WebSocketError( diff --git a/examples/upload.py b/examples/upload.py index 84e75063..ea36a66b 100644 --- a/examples/upload.py +++ b/examples/upload.py @@ -17,12 +17,15 @@ local_path="./image.png", # local file to upload ) +with open("./image.jpg", "rb") as file: + data = file.read() + # you can add multiple files under the same field name mp.addpart( name="image", content_type="image/jpg", filename="image.jpg", - data=open("./image.jpg", "rb").read(), # note the difference vs above + data=data, # note the difference vs above ) # from a list diff --git a/pyproject.toml b/pyproject.toml index 690c53da..d653ae6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -115,8 +115,14 @@ asyncio_mode = "auto" line-length = 100 [tool.ruff.lint] -# Enable the isort rules. -extend-select = ["I"] +select = [ + "E", # pycodestyle + "F", # Pyflakes + "UP", # pyupgrade + "B", # flake8-bugbear + "SIM", # flake8-simplify + "I", # isort +] [tool.mypy] python_version = "3.8" diff --git a/scripts/build.py b/scripts/build.py index b705a0d5..2c96e1d2 100644 --- a/scripts/build.py +++ b/scripts/build.py @@ -52,10 +52,8 @@ def download_libcurl(): return file = "libcurl-impersonate.tar.gz" - if arch["system"] == "Linux": - sysname = "linux-" + arch["libc"] - else: - sysname = arch["sysname"] + sysname = "linux-" + arch["libc"] if arch["system"] == "Linux" else arch["sysname"] + url = ( f"https://github.com/yifeikong/curl-impersonate/releases/download/" f"v{__version__}/libcurl-impersonate-v{__version__}" @@ -94,9 +92,9 @@ def get_curl_archives(): def get_curl_libraries(): if arch["system"] == "Windows": return ["libcurl"] - elif arch["system"] == "Darwin": - return ["curl-impersonate-chrome"] - elif arch["system"] == "Linux" and arch.get("link_type") == "dynamic": + elif arch["system"] == "Darwin" or ( + arch["system"] == "Linux" and arch.get("link_type") == "dynamic" + ): return ["curl-impersonate-chrome"] else: return [] diff --git a/scripts/generate_consts.py b/scripts/generate_consts.py index d807149a..ba629196 100644 --- a/scripts/generate_consts.py +++ b/scripts/generate_consts.py @@ -15,7 +15,7 @@ f.write("class CurlOpt(IntEnum):\n") cmd = rf""" echo '#include "{CURL_VERSION}/include/curl/curl.h"' | gcc -E - | grep -i "CURLOPT_.\+ =" | sed "s/ CURLOPT_/ /g" | sed "s/,//g" - """ + """ # noqa E501 output = subprocess.check_output(cmd, shell=True) f.write(output.decode()) f.write( @@ -32,7 +32,7 @@ f.write("class CurlInfo(IntEnum):\n") cmd = rf""" echo '#include "{CURL_VERSION}/include/curl/curl.h"' | gcc -E - | grep -i "CURLINFO_.\+ =" | sed "s/ CURLINFO_/ /g" | sed "s/,//g" - """ + """ # noqa E501 output = subprocess.check_output(cmd, shell=True) f.write(output.decode()) f.write( @@ -45,7 +45,7 @@ f.write("class CurlMOpt(IntEnum):\n") cmd = rf""" echo '#include "{CURL_VERSION}/include/curl/curl.h"' | gcc -E - | grep -i "CURLMOPT_.\+ =" | sed "s/ CURLMOPT_/ /g" | sed "s/,//g" - """ + """ # noqa E501 output = subprocess.check_output(cmd, shell=True) f.write(output.decode()) f.write("\n\n") @@ -53,7 +53,7 @@ f.write("class CurlECode(IntEnum):\n") cmd = rf""" echo '#include "{CURL_VERSION}/include/curl/curl.h"' | gcc -E - | grep -i CURLE_ | sed "s/[, ][=0]*//g" | sed "s/CURLE_/ /g" | awk '{{print $0 " = " NR-1}}' - """ + """ # noqa E501 output = subprocess.check_output(cmd, shell=True) f.write(output.decode()) f.write("\n\n") diff --git a/tests/unittest/test_async_session.py b/tests/unittest/test_async_session.py index a060e851..fa9a0d23 100644 --- a/tests/unittest/test_async_session.py +++ b/tests/unittest/test_async_session.py @@ -1,6 +1,7 @@ import asyncio import base64 import json +from contextlib import suppress import pytest @@ -310,10 +311,8 @@ async def test_post_body_cleaned(server): async def test_timers_leak(server): async with AsyncSession() as sess: for _ in range(3): - try: + with suppress(Exception): await sess.get(str(server.url.copy_with(path="/slow_response")), timeout=0.1) - except Exception: - pass await asyncio.sleep(0.2) assert len(sess.acurl._timers) == 0 diff --git a/tests/unittest/test_requests.py b/tests/unittest/test_requests.py index 57a7165b..ec388f0c 100644 --- a/tests/unittest/test_requests.py +++ b/tests/unittest/test_requests.py @@ -567,10 +567,9 @@ def test_stream_empty_body(server): def test_stream_incomplete_read(server): with requests.Session() as s: url = str(server.url.copy_with(path="/incomplete_read")) - with pytest.raises(requests.RequestsError) as e: - with s.stream("GET", url) as r: - for _ in r.iter_content(): - continue + with pytest.raises(requests.RequestsError) as e, s.stream("GET", url) as r: + for _ in r.iter_content(): + continue assert e.value.code == CurlECode.PARTIAL_FILE @@ -590,9 +589,8 @@ def test_stream_incomplete_read_without_close(server): def test_stream_redirect_loop(server): with requests.Session() as s: url = str(server.url.copy_with(path="/redirect_loop")) - with pytest.raises(requests.RequestsError) as e: - with s.stream("GET", url, max_redirects=2): - pass + with pytest.raises(requests.RequestsError) as e, s.stream("GET", url, max_redirects=2): + pass assert e.value.code == CurlECode.TOO_MANY_REDIRECTS assert isinstance(e.value.response, Response) assert e.value.response.status_code == 301 @@ -665,11 +663,9 @@ def test_stream_close_early(server): # from http://xcal1.vodafone.co.uk/ url = "http://212.183.159.230/200MB.zip" r = s.get(url, max_recv_speed=1024 * 1024, stream=True) - counter = 0 start = time.time() - for _ in r.iter_content(): - counter += 1 - if counter > 10: + for i, _ in enumerate(r.iter_content()): + if i > 10: break r.close() end = time.time()