From 500adb6658cdfd50d3b0f640e10c18be497aa759 Mon Sep 17 00:00:00 2001 From: John Kurkowski Date: Wed, 29 Dec 2021 17:33:52 -0800 Subject: [PATCH] Format --- tests/custom_suffix_test.py | 40 ++--- tests/integration_test.py | 2 +- tests/main_test.py | 314 ++++++++++++++++++++++-------------- tests/test_cache.py | 23 ++- tests/test_parallel.py | 14 +- tox.ini | 3 +- 6 files changed, 232 insertions(+), 164 deletions(-) diff --git a/tests/custom_suffix_test.py b/tests/custom_suffix_test.py index f0621932..8258a53d 100644 --- a/tests/custom_suffix_test.py +++ b/tests/custom_suffix_test.py @@ -1,4 +1,4 @@ -'''tldextract unit tests with a custom suffix list.''' +"""tldextract unit tests with a custom suffix list.""" import os import tempfile @@ -6,45 +6,49 @@ import tldextract FAKE_SUFFIX_LIST_URL = "file://" + os.path.join( - os.path.dirname(os.path.abspath(__file__)), - 'fixtures/fake_suffix_list_fixture.dat' + os.path.dirname(os.path.abspath(__file__)), "fixtures/fake_suffix_list_fixture.dat" ) -EXTRA_SUFFIXES = ['foo1', 'bar1', 'baz1'] +EXTRA_SUFFIXES = ["foo1", "bar1", "baz1"] extract_using_fake_suffix_list = tldextract.TLDExtract( - cache_dir=tempfile.mkdtemp(), - suffix_list_urls=[FAKE_SUFFIX_LIST_URL] + cache_dir=tempfile.mkdtemp(), suffix_list_urls=[FAKE_SUFFIX_LIST_URL] ) extract_using_fake_suffix_list_no_cache = tldextract.TLDExtract( - cache_dir=None, - suffix_list_urls=[FAKE_SUFFIX_LIST_URL] + cache_dir=None, suffix_list_urls=[FAKE_SUFFIX_LIST_URL] ) extract_using_extra_suffixes = tldextract.TLDExtract( cache_dir=None, suffix_list_urls=[FAKE_SUFFIX_LIST_URL], - extra_suffixes=EXTRA_SUFFIXES + extra_suffixes=EXTRA_SUFFIXES, ) def test_private_extraction(): - tld = tldextract.TLDExtract( - cache_dir=tempfile.mkdtemp(), - suffix_list_urls=[] - ) + tld = tldextract.TLDExtract(cache_dir=tempfile.mkdtemp(), suffix_list_urls=[]) - assert tld("foo.blogspot.com") == ('foo', 'blogspot', 'com') - assert tld("foo.blogspot.com", include_psl_private_domains=True) == ('', 'foo', 'blogspot.com') + assert tld("foo.blogspot.com") == ("foo", "blogspot", "com") + assert tld("foo.blogspot.com", include_psl_private_domains=True) == ( + "", + "foo", + "blogspot.com", + ) def test_suffix_which_is_not_in_custom_list(): - for fun in (extract_using_fake_suffix_list, extract_using_fake_suffix_list_no_cache): + for fun in ( + extract_using_fake_suffix_list, + extract_using_fake_suffix_list_no_cache, + ): result = fun("www.google.com") assert result.suffix == "" def test_custom_suffixes(): - for fun in (extract_using_fake_suffix_list, extract_using_fake_suffix_list_no_cache): - for custom_suffix in ('foo', 'bar', 'baz'): + for fun in ( + extract_using_fake_suffix_list, + extract_using_fake_suffix_list_no_cache, + ): + for custom_suffix in ("foo", "bar", "baz"): result = fun("www.foo.bar.baz.quux" + "." + custom_suffix) assert result.suffix == custom_suffix diff --git a/tests/integration_test.py b/tests/integration_test.py index d017890e..d95f26ac 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -1,4 +1,4 @@ -'''tldextract integration tests.''' +"""tldextract integration tests.""" import pytest diff --git a/tests/main_test.py b/tests/main_test.py index dc772f70..7f13671f 100644 --- a/tests/main_test.py +++ b/tests/main_test.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -'''Main tldextract unit tests.''' +"""Main tldextract unit tests.""" import logging import os @@ -12,32 +12,35 @@ from tldextract.suffix_list import SuffixListNotFound from tldextract.tldextract import ExtractResult - extract = tldextract.TLDExtract(cache_dir=tempfile.mkdtemp()) extract_no_cache = tldextract.TLDExtract(cache_dir=False) -extract_using_real_local_suffix_list = tldextract.TLDExtract(cache_dir=tempfile.mkdtemp()) +extract_using_real_local_suffix_list = tldextract.TLDExtract( + cache_dir=tempfile.mkdtemp() +) extract_using_real_local_suffix_list_no_cache = tldextract.TLDExtract(cache_dir=False) extract_using_fallback_to_snapshot_no_cache = tldextract.TLDExtract( - cache_dir=None, - suffix_list_urls=None + cache_dir=None, suffix_list_urls=None ) def assert_extract( # pylint: disable=missing-docstring - url, - expected_domain_data, - expected_ip_data='', - funs=( - extract, - extract_no_cache, - extract_using_real_local_suffix_list, - extract_using_real_local_suffix_list_no_cache, - extract_using_fallback_to_snapshot_no_cache - )): - (expected_fqdn, - expected_subdomain, - expected_domain, - expected_tld) = expected_domain_data + url, + expected_domain_data, + expected_ip_data="", + funs=( + extract, + extract_no_cache, + extract_using_real_local_suffix_list, + extract_using_real_local_suffix_list_no_cache, + extract_using_fallback_to_snapshot_no_cache, + ), +): + ( + expected_fqdn, + expected_subdomain, + expected_domain, + expected_tld, + ) = expected_domain_data for fun in funs: ext = fun(url) assert expected_fqdn == ext.fqdn @@ -48,13 +51,14 @@ def assert_extract( # pylint: disable=missing-docstring def test_american(): - assert_extract('http://www.google.com', - ('www.google.com', 'www', 'google', 'com')) + assert_extract("http://www.google.com", ("www.google.com", "www", "google", "com")) def test_british(): - assert_extract("http://www.theregister.co.uk", - ("www.theregister.co.uk", "www", "theregister", "co.uk")) + assert_extract( + "http://www.theregister.co.uk", + ("www.theregister.co.uk", "www", "theregister", "co.uk"), + ) def test_no_subdomain(): @@ -62,177 +66,244 @@ def test_no_subdomain(): def test_nested_subdomain(): - assert_extract("http://media.forums.theregister.co.uk", - ("media.forums.theregister.co.uk", "media.forums", - "theregister", "co.uk")) + assert_extract( + "http://media.forums.theregister.co.uk", + ("media.forums.theregister.co.uk", "media.forums", "theregister", "co.uk"), + ) def test_odd_but_possible(): - assert_extract('http://www.www.com', ('www.www.com', 'www', 'www', 'com')) - assert_extract('http://www.com', ('www.com', '', 'www', 'com')) + assert_extract("http://www.www.com", ("www.www.com", "www", "www", "com")) + assert_extract("http://www.com", ("www.com", "", "www", "com")) def test_suffix(): - assert_extract('com', ('', '', '', 'com')) - assert_extract('co.uk', ('', '', '', 'co.uk')) + assert_extract("com", ("", "", "", "com")) + assert_extract("co.uk", ("", "", "", "co.uk")) def test_local_host(): - assert_extract('http://internalunlikelyhostname/', - ('', '', 'internalunlikelyhostname', '')) - assert_extract('http://internalunlikelyhostname.bizarre', - ('', 'internalunlikelyhostname', 'bizarre', '')) + assert_extract( + "http://internalunlikelyhostname/", ("", "", "internalunlikelyhostname", "") + ) + assert_extract( + "http://internalunlikelyhostname.bizarre", + ("", "internalunlikelyhostname", "bizarre", ""), + ) def test_qualified_local_host(): - assert_extract('http://internalunlikelyhostname.info/', - ('internalunlikelyhostname.info', - '', 'internalunlikelyhostname', 'info')) - assert_extract('http://internalunlikelyhostname.information/', - ('', - 'internalunlikelyhostname', 'information', '')) + assert_extract( + "http://internalunlikelyhostname.info/", + ("internalunlikelyhostname.info", "", "internalunlikelyhostname", "info"), + ) + assert_extract( + "http://internalunlikelyhostname.information/", + ("", "internalunlikelyhostname", "information", ""), + ) def test_ip(): - assert_extract('http://216.22.0.192/', - ('', '', '216.22.0.192', ''), - expected_ip_data='216.22.0.192',) - assert_extract('http://216.22.project.coop/', - ('216.22.project.coop', '216.22', 'project', 'coop')) + assert_extract( + "http://216.22.0.192/", + ("", "", "216.22.0.192", ""), + expected_ip_data="216.22.0.192", + ) + assert_extract( + "http://216.22.project.coop/", + ("216.22.project.coop", "216.22", "project", "coop"), + ) def test_looks_like_ip(): - assert_extract('1\xe9', ('', '', '1\xe9', '')) + assert_extract("1\xe9", ("", "", "1\xe9", "")) def test_punycode(): - assert_extract('http://xn--h1alffa9f.xn--p1ai', - ('xn--h1alffa9f.xn--p1ai', '', 'xn--h1alffa9f', 'xn--p1ai')) - assert_extract('http://xN--h1alffa9f.xn--p1ai', - ('xN--h1alffa9f.xn--p1ai', '', 'xN--h1alffa9f', 'xn--p1ai')) - assert_extract('http://XN--h1alffa9f.xn--p1ai', - ('XN--h1alffa9f.xn--p1ai', '', 'XN--h1alffa9f', 'xn--p1ai')) + assert_extract( + "http://xn--h1alffa9f.xn--p1ai", + ("xn--h1alffa9f.xn--p1ai", "", "xn--h1alffa9f", "xn--p1ai"), + ) + assert_extract( + "http://xN--h1alffa9f.xn--p1ai", + ("xN--h1alffa9f.xn--p1ai", "", "xN--h1alffa9f", "xn--p1ai"), + ) + assert_extract( + "http://XN--h1alffa9f.xn--p1ai", + ("XN--h1alffa9f.xn--p1ai", "", "XN--h1alffa9f", "xn--p1ai"), + ) # Entries that might generate UnicodeError exception # This subdomain generates UnicodeError 'IDNA does not round-trip' - assert_extract('xn--tub-1m9d15sfkkhsifsbqygyujjrw602gk4li5qqk98aca0w.google.com', - ('xn--tub-1m9d15sfkkhsifsbqygyujjrw602gk4li5qqk98aca0w.google.com', - 'xn--tub-1m9d15sfkkhsifsbqygyujjrw602gk4li5qqk98aca0w', 'google', - 'com')) + assert_extract( + "xn--tub-1m9d15sfkkhsifsbqygyujjrw602gk4li5qqk98aca0w.google.com", + ( + "xn--tub-1m9d15sfkkhsifsbqygyujjrw602gk4li5qqk98aca0w.google.com", + "xn--tub-1m9d15sfkkhsifsbqygyujjrw602gk4li5qqk98aca0w", + "google", + "com", + ), + ) # This subdomain generates UnicodeError 'incomplete punicode string' - assert_extract('xn--tub-1m9d15sfkkhsifsbqygyujjrw60.google.com', - ('xn--tub-1m9d15sfkkhsifsbqygyujjrw60.google.com', - 'xn--tub-1m9d15sfkkhsifsbqygyujjrw60', 'google', 'com')) + assert_extract( + "xn--tub-1m9d15sfkkhsifsbqygyujjrw60.google.com", + ( + "xn--tub-1m9d15sfkkhsifsbqygyujjrw60.google.com", + "xn--tub-1m9d15sfkkhsifsbqygyujjrw60", + "google", + "com", + ), + ) def test_invalid_puny_with_puny(): - assert_extract('http://xn--zckzap6140b352by.blog.so-net.xn--wcvs22d.hk', - ('xn--zckzap6140b352by.blog.so-net.xn--wcvs22d.hk', - 'xn--zckzap6140b352by.blog', 'so-net', 'xn--wcvs22d.hk')) - assert_extract('http://xn--&.so-net.com', - ('xn--&.so-net.com', - 'xn--&', 'so-net', 'com')) + assert_extract( + "http://xn--zckzap6140b352by.blog.so-net.xn--wcvs22d.hk", + ( + "xn--zckzap6140b352by.blog.so-net.xn--wcvs22d.hk", + "xn--zckzap6140b352by.blog", + "so-net", + "xn--wcvs22d.hk", + ), + ) + assert_extract( + "http://xn--&.so-net.com", ("xn--&.so-net.com", "xn--&", "so-net", "com") + ) def test_puny_with_non_puny(): - assert_extract('http://xn--zckzap6140b352by.blog.so-net.教育.hk', - ('xn--zckzap6140b352by.blog.so-net.教育.hk', - 'xn--zckzap6140b352by.blog', 'so-net', '教育.hk')) + assert_extract( + "http://xn--zckzap6140b352by.blog.so-net.教育.hk", + ( + "xn--zckzap6140b352by.blog.so-net.教育.hk", + "xn--zckzap6140b352by.blog", + "so-net", + "教育.hk", + ), + ) def test_idna_2008(): """Python supports IDNA 2003. The IDNA library adds 2008 support for characters like ß. """ - assert_extract('xn--gieen46ers-73a.de', - ('xn--gieen46ers-73a.de', '', 'xn--gieen46ers-73a', 'de')) + assert_extract( + "xn--gieen46ers-73a.de", + ("xn--gieen46ers-73a.de", "", "xn--gieen46ers-73a", "de"), + ) def test_empty(): - assert_extract('http://', ('', '', '', '')) + assert_extract("http://", ("", "", "", "")) def test_scheme(): - assert_extract('https://mail.google.com/mail', ('mail.google.com', 'mail', 'google', 'com')) - assert_extract('ssh://mail.google.com/mail', ('mail.google.com', 'mail', 'google', 'com')) - assert_extract('//mail.google.com/mail', ('mail.google.com', 'mail', 'google', 'com')) - assert_extract('mail.google.com/mail', - ('mail.google.com', 'mail', 'google', 'com'), funs=(extract,)) + assert_extract( + "https://mail.google.com/mail", ("mail.google.com", "mail", "google", "com") + ) + assert_extract( + "ssh://mail.google.com/mail", ("mail.google.com", "mail", "google", "com") + ) + assert_extract( + "//mail.google.com/mail", ("mail.google.com", "mail", "google", "com") + ) + assert_extract( + "mail.google.com/mail", + ("mail.google.com", "mail", "google", "com"), + funs=(extract,), + ) def test_port(): - assert_extract('git+ssh://www.github.com:8443/', ('www.github.com', 'www', 'github', 'com')) + assert_extract( + "git+ssh://www.github.com:8443/", ("www.github.com", "www", "github", "com") + ) def test_username(): - assert_extract('ftp://johndoe:5cr1p7k1dd13@1337.warez.com:2501', - ('1337.warez.com', '1337', 'warez', 'com')) + assert_extract( + "ftp://johndoe:5cr1p7k1dd13@1337.warez.com:2501", + ("1337.warez.com", "1337", "warez", "com"), + ) def test_query_fragment(): - assert_extract('http://google.com?q=cats', ('google.com', '', 'google', 'com')) - assert_extract('http://google.com#Welcome', ('google.com', '', 'google', 'com')) - assert_extract('http://google.com/#Welcome', ('google.com', '', 'google', 'com')) - assert_extract('http://google.com/s#Welcome', ('google.com', '', 'google', 'com')) - assert_extract('http://google.com/s?q=cats#Welcome', ('google.com', '', 'google', 'com')) + assert_extract("http://google.com?q=cats", ("google.com", "", "google", "com")) + assert_extract("http://google.com#Welcome", ("google.com", "", "google", "com")) + assert_extract("http://google.com/#Welcome", ("google.com", "", "google", "com")) + assert_extract("http://google.com/s#Welcome", ("google.com", "", "google", "com")) + assert_extract( + "http://google.com/s?q=cats#Welcome", ("google.com", "", "google", "com") + ) def test_regex_order(): - assert_extract('http://www.parliament.uk', - ('www.parliament.uk', 'www', 'parliament', 'uk')) - assert_extract('http://www.parliament.co.uk', - ('www.parliament.co.uk', 'www', 'parliament', 'co.uk')) + assert_extract( + "http://www.parliament.uk", ("www.parliament.uk", "www", "parliament", "uk") + ) + assert_extract( + "http://www.parliament.co.uk", + ("www.parliament.co.uk", "www", "parliament", "co.uk"), + ) def test_unhandled_by_iana(): - assert_extract('http://www.cgs.act.edu.au/', - ('www.cgs.act.edu.au', 'www', 'cgs', 'act.edu.au')) - assert_extract('http://www.google.com.au/', - ('www.google.com.au', 'www', 'google', 'com.au')) + assert_extract( + "http://www.cgs.act.edu.au/", ("www.cgs.act.edu.au", "www", "cgs", "act.edu.au") + ) + assert_extract( + "http://www.google.com.au/", ("www.google.com.au", "www", "google", "com.au") + ) def test_tld_is_a_website_too(): - assert_extract('http://www.metp.net.cn', ('www.metp.net.cn', 'www', 'metp', 'net.cn')) + assert_extract( + "http://www.metp.net.cn", ("www.metp.net.cn", "www", "metp", "net.cn") + ) # This is unhandled by the PSL. Or is it? # assert_extract(http://www.net.cn', # ('www.net.cn', 'www', 'net', 'cn')) def test_dns_root_label(): - assert_extract('http://www.example.com./', - ('www.example.com', 'www', 'example', 'com')) + assert_extract( + "http://www.example.com./", ("www.example.com", "www", "example", "com") + ) def test_private_domains(): - assert_extract('http://waiterrant.blogspot.com', - ('waiterrant.blogspot.com', 'waiterrant', 'blogspot', 'com')) + assert_extract( + "http://waiterrant.blogspot.com", + ("waiterrant.blogspot.com", "waiterrant", "blogspot", "com"), + ) def test_ipv4(): - assert_extract('http://127.0.0.1/foo/bar', - ('', '', '127.0.0.1', ''), - expected_ip_data='127.0.0.1') + assert_extract( + "http://127.0.0.1/foo/bar", + ("", "", "127.0.0.1", ""), + expected_ip_data="127.0.0.1", + ) def test_ipv4_bad(): - assert_extract('http://256.256.256.256/foo/bar', - ('', '256.256.256', '256', ''), - expected_ip_data='') + assert_extract( + "http://256.256.256.256/foo/bar", + ("", "256.256.256", "256", ""), + expected_ip_data="", + ) def test_ipv4_lookalike(): - assert_extract('http://127.0.0.1.9/foo/bar', - ('', '127.0.0.1', '9', ''), - expected_ip_data='') + assert_extract( + "http://127.0.0.1.9/foo/bar", ("", "127.0.0.1", "9", ""), expected_ip_data="" + ) def test_result_as_dict(): result = extract( - "http://admin:password1@www.google.com:666" - "/secret/admin/interface?param1=42" + "http://admin:password1@www.google.com:666" "/secret/admin/interface?param1=42" ) - expected_dict = {'subdomain': 'www', - 'domain': 'google', - 'suffix': 'com'} + expected_dict = {"subdomain": "www", "domain": "google", "suffix": "com"} assert result._asdict() == expected_dict @@ -263,12 +334,8 @@ def no_permission_makedirs(*args, **kwargs): @responses.activate def test_cache_timeouts(tmpdir): - server = 'http://some-server.com' - responses.add( - responses.GET, - server, - status=408 - ) + server = "http://some-server.com" + responses.add(responses.GET, server, status=408) cache = DiskCache(tmpdir) with pytest.raises(SuffixListNotFound): @@ -277,14 +344,10 @@ def test_cache_timeouts(tmpdir): def test_tlds_property(): extract_private = tldextract.TLDExtract( - cache_dir=None, - suffix_list_urls=None, - include_psl_private_domains=True + cache_dir=None, suffix_list_urls=None, include_psl_private_domains=True ) extract_public = tldextract.TLDExtract( - cache_dir=None, - suffix_list_urls=None, - include_psl_private_domains=False + cache_dir=None, suffix_list_urls=None, include_psl_private_domains=False ) assert len(extract_private.tlds) > len(extract_public.tlds) @@ -293,5 +356,6 @@ def test_global_extract(): assert tldextract.extract("foo.blogspot.com") == ExtractResult( subdomain="foo", domain="blogspot", suffix="com" ) - assert tldextract.extract("foo.blogspot.com", include_psl_private_domains=True) == \ - ExtractResult(subdomain='', domain='foo', suffix='blogspot.com') + assert tldextract.extract( + "foo.blogspot.com", include_psl_private_domains=True + ) == ExtractResult(subdomain="", domain="foo", suffix="blogspot.com") diff --git a/tests/test_cache.py b/tests/test_cache.py index b148ae15..78883e26 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -4,9 +4,9 @@ import types import pytest - import tldextract.cache -from tldextract.cache import DiskCache, get_pkg_unique_identifier, get_cache_dir +from tldextract.cache import (DiskCache, get_cache_dir, + get_pkg_unique_identifier) def test_disk_cache(tmpdir): @@ -27,16 +27,21 @@ def test_get_pkg_unique_identifier(monkeypatch): monkeypatch.setattr(sys, "version_info", (3, 8, 1, "final", 0)) monkeypatch.setattr(sys, "prefix", "/home/john/.pyenv/versions/myvirtualenv") - mock_version_module = types.ModuleType('tldextract._version', 'mocked module') + mock_version_module = types.ModuleType("tldextract._version", "mocked module") mock_version_module.version = "1.2.3" monkeypatch.setitem(sys.modules, "tldextract._version", mock_version_module) - assert get_pkg_unique_identifier() == "3.8.1.final__myvirtualenv__f01a7b__tldextract-1.2.3" + assert ( + get_pkg_unique_identifier() + == "3.8.1.final__myvirtualenv__f01a7b__tldextract-1.2.3" + ) def test_get_cache_dir(monkeypatch): pkg_identifier = "3.8.1.final__myvirtualenv__f01a7b__tldextract-1.2.3" - monkeypatch.setattr(tldextract.cache, "get_pkg_unique_identifier", lambda: pkg_identifier) + monkeypatch.setattr( + tldextract.cache, "get_pkg_unique_identifier", lambda: pkg_identifier + ) # with no HOME set, fallback to attempting to use package directory itself monkeypatch.delenv("HOME", raising=False) @@ -48,14 +53,18 @@ def test_get_cache_dir(monkeypatch): monkeypatch.setenv("HOME", "/home/john") monkeypatch.delenv("XDG_CACHE_HOME", raising=False) monkeypatch.delenv("TLDEXTRACT_CACHE", raising=False) - assert get_cache_dir() == os.path.join("/home/john", ".cache/python-tldextract", pkg_identifier) + assert get_cache_dir() == os.path.join( + "/home/john", ".cache/python-tldextract", pkg_identifier + ) # if XDG_CACHE_HOME is set, use it monkeypatch.setenv("HOME", "/home/john") monkeypatch.setenv("XDG_CACHE_HOME", "/my/alt/cache") monkeypatch.delenv("TLDEXTRACT_CACHE", raising=False) - assert get_cache_dir() == os.path.join("/my/alt/cache/python-tldextract", pkg_identifier) + assert get_cache_dir() == os.path.join( + "/my/alt/cache/python-tldextract", pkg_identifier + ) # if TLDEXTRACT_CACHE is set, use it monkeypatch.setenv("HOME", "/home/john") diff --git a/tests/test_parallel.py b/tests/test_parallel.py index 680c2263..ed3eb5d5 100644 --- a/tests/test_parallel.py +++ b/tests/test_parallel.py @@ -20,12 +20,7 @@ def test_multiprocessing_makes_one_request(tmpdir): @responses.activate def _run_extractor(cache_dir): """run the extractor""" - responses.add( - responses.GET, - PUBLIC_SUFFIX_LIST_URLS[0], - status=208, - body="uk.co" - ) + responses.add(responses.GET, PUBLIC_SUFFIX_LIST_URLS[0], status=208, body="uk.co") extract = TLDExtract(cache_dir=cache_dir) extract("bar.uk.com", include_psl_private_domains=True) @@ -35,12 +30,7 @@ def _run_extractor(cache_dir): @responses.activate def test_cache_cleared_by_other_process(tmpdir, monkeypatch): """Simulate a file being deleted after we check for existence but before we try to delete it""" - responses.add( - responses.GET, - PUBLIC_SUFFIX_LIST_URLS[0], - status=208, - body="uk.com" - ) + responses.add(responses.GET, PUBLIC_SUFFIX_LIST_URLS[0], status=208, body="uk.com") cache_dir = str(tmpdir) extract = TLDExtract(cache_dir=cache_dir) diff --git a/tox.ini b/tox.ini index 47011557..fc979080 100644 --- a/tox.ini +++ b/tox.ini @@ -27,4 +27,5 @@ commands = pytest --pylint -m pylint {posargs} [pycodestyle] # E203 - whitespace before; disagrees with PEP8 https://github.com/psf/black/issues/354#issuecomment-397684838 # E501 - line too long -ignore = E203,E501 +# W503 - line break before binary operator; disagrees with PEP8 https://github.com/psf/black/issues/52 +ignore = E203, E501, W503