Mock Version: 3.0 Mock Version: 3.0 Mock Version: 3.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-453126-21993/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1674172800 Wrote: /builddir/build/SRPMS/python-dns-2.3.0-2.fc38.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-453126-21993/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1674172800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.A8eVNL + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf dnspython-2.3.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/dnspython-2.3.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd dnspython-2.3.0 + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + find examples -type f + xargs chmod a-x + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.NMxppv + umask 022 + cd /builddir/build/BUILD + cd dnspython-2.3.0 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/pyproject-wheeldir -r -x dnssec -x idna -x trio -x curio -x doh Handling poetry-core from build-system.requires Requirement not satisfied: poetry-core Exiting dependency generation pass: build backend + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-dns-2.3.0-2.fc38.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-453126-21993/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1674172800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.SP9sXh + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf dnspython-2.3.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/dnspython-2.3.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd dnspython-2.3.0 + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + find examples -type f + xargs chmod a-x + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.tmr45a + umask 022 + cd /builddir/build/BUILD + cd dnspython-2.3.0 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/pyproject-wheeldir -r -x dnssec -x idna -x trio -x curio -x doh Handling poetry-core from build-system.requires Requirement satisfied: poetry-core (installed: poetry-core 1.4.0) Handling aioquic (>=0.9.20) ; extra == "doq" from hook generated metadata: Requires-Dist Ignoring alien requirement: aioquic (>=0.9.20) ; extra == "doq" Handling cryptography (>=2.6,<40.0) ; extra == "dnssec" from hook generated metadata: Requires-Dist Requirement not satisfied: cryptography (>=2.6,<40.0) ; extra == "dnssec" Handling curio (>=1.2,<2.0) ; extra == "curio" from hook generated metadata: Requires-Dist Requirement not satisfied: curio (>=1.2,<2.0) ; extra == "curio" Handling h2 (>=4.1.0) ; (python_full_version >= "3.6.2") and (extra == "doh") from hook generated metadata: Requires-Dist Requirement not satisfied: h2 (>=4.1.0) ; (python_full_version >= "3.6.2") and (extra == "doh") Handling httpx (>=0.21.1) ; (python_full_version >= "3.6.2") and (extra == "doh") from hook generated metadata: Requires-Dist Requirement not satisfied: httpx (>=0.21.1) ; (python_full_version >= "3.6.2") and (extra == "doh") Handling idna (>=2.1,<4.0) ; extra == "idna" from hook generated metadata: Requires-Dist Requirement not satisfied: idna (>=2.1,<4.0) ; extra == "idna" Handling requests (>=2.23.0,<3.0.0) ; extra == "doh" from hook generated metadata: Requires-Dist Requirement not satisfied: requests (>=2.23.0,<3.0.0) ; extra == "doh" Handling requests-toolbelt (>=0.9.1,<0.11.0) ; extra == "doh" from hook generated metadata: Requires-Dist Requirement not satisfied: requests-toolbelt (>=0.9.1,<0.11.0) ; extra == "doh" Handling sniffio (>=1.1,<2.0) ; extra == "curio" from hook generated metadata: Requires-Dist Requirement not satisfied: sniffio (>=1.1,<2.0) ; extra == "curio" Handling trio (>=0.14,<0.23) ; extra == "trio" from hook generated metadata: Requires-Dist Requirement not satisfied: trio (>=0.14,<0.23) ; extra == "trio" Handling wmi (>=1.5.1,<2.0.0) ; extra == "wmi" from hook generated metadata: Requires-Dist Ignoring alien requirement: wmi (>=1.5.1,<2.0.0) ; extra == "wmi" + rm -rfv dnspython-2.3.0.dist-info/ removed 'dnspython-2.3.0.dist-info/METADATA' removed 'dnspython-2.3.0.dist-info/WHEEL' removed 'dnspython-2.3.0.dist-info/LICENSE' removed directory 'dnspython-2.3.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-dns-2.3.0-2.fc38.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-453126-21993/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1674172800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.n4pjUT + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf dnspython-2.3.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/dnspython-2.3.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd dnspython-2.3.0 + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + find examples -type f + xargs chmod a-x + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.um5tKi + umask 022 + cd /builddir/build/BUILD + cd dnspython-2.3.0 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/pyproject-wheeldir -r -x dnssec -x idna -x trio -x curio -x doh Handling poetry-core from build-system.requires Requirement satisfied: poetry-core (installed: poetry-core 1.4.0) Handling aioquic (>=0.9.20) ; extra == "doq" from hook generated metadata: Requires-Dist Ignoring alien requirement: aioquic (>=0.9.20) ; extra == "doq" Handling cryptography (>=2.6,<40.0) ; extra == "dnssec" from hook generated metadata: Requires-Dist Requirement satisfied: cryptography (>=2.6,<40.0) ; extra == "dnssec" (installed: cryptography 37.0.2) Handling curio (>=1.2,<2.0) ; extra == "curio" from hook generated metadata: Requires-Dist Requirement satisfied: curio (>=1.2,<2.0) ; extra == "curio" (installed: curio 1.6) Handling h2 (>=4.1.0) ; (python_full_version >= "3.6.2") and (extra == "doh") from hook generated metadata: Requires-Dist Requirement satisfied: h2 (>=4.1.0) ; (python_full_version >= "3.6.2") and (extra == "doh") (installed: h2 4.1.0) Handling httpx (>=0.21.1) ; (python_full_version >= "3.6.2") and (extra == "doh") from hook generated metadata: Requires-Dist Requirement satisfied: httpx (>=0.21.1) ; (python_full_version >= "3.6.2") and (extra == "doh") (installed: httpx 0.23.0) Handling idna (>=2.1,<4.0) ; extra == "idna" from hook generated metadata: Requires-Dist Requirement satisfied: idna (>=2.1,<4.0) ; extra == "idna" (installed: idna 3.4) Handling requests (>=2.23.0,<3.0.0) ; extra == "doh" from hook generated metadata: Requires-Dist Requirement satisfied: requests (>=2.23.0,<3.0.0) ; extra == "doh" (installed: requests 2.28.2) Handling requests-toolbelt (>=0.9.1,<0.11.0) ; extra == "doh" from hook generated metadata: Requires-Dist Requirement satisfied: requests-toolbelt (>=0.9.1,<0.11.0) ; extra == "doh" (installed: requests-toolbelt 0.9.1) Handling sniffio (>=1.1,<2.0) ; extra == "curio" from hook generated metadata: Requires-Dist Requirement satisfied: sniffio (>=1.1,<2.0) ; extra == "curio" (installed: sniffio 1.2.0) Handling trio (>=0.14,<0.23) ; extra == "trio" from hook generated metadata: Requires-Dist Requirement satisfied: trio (>=0.14,<0.23) ; extra == "trio" (installed: trio 0.22.0) Handling wmi (>=1.5.1,<2.0.0) ; extra == "wmi" from hook generated metadata: Requires-Dist Ignoring alien requirement: wmi (>=1.5.1,<2.0.0) ; extra == "wmi" + rm -rfv dnspython-2.3.0.dist-info/ removed 'dnspython-2.3.0.dist-info/METADATA' removed 'dnspython-2.3.0.dist-info/WHEEL' removed 'dnspython-2.3.0.dist-info/LICENSE' removed directory 'dnspython-2.3.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-dns-2.3.0-2.fc38.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-453126-21993/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --target noarch --nodeps /builddir/build/SPECS/python-dns.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1674172800 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.636Egw + umask 022 + cd /builddir/build/BUILD + cd dnspython-2.3.0 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/pyproject-wheeldir -r -x dnssec -x idna -x trio -x curio -x doh Handling poetry-core from build-system.requires Requirement satisfied: poetry-core (installed: poetry-core 1.4.0) Handling aioquic (>=0.9.20) ; extra == "doq" from hook generated metadata: Requires-Dist Ignoring alien requirement: aioquic (>=0.9.20) ; extra == "doq" Handling cryptography (>=2.6,<40.0) ; extra == "dnssec" from hook generated metadata: Requires-Dist Requirement satisfied: cryptography (>=2.6,<40.0) ; extra == "dnssec" (installed: cryptography 37.0.2) Handling curio (>=1.2,<2.0) ; extra == "curio" from hook generated metadata: Requires-Dist Requirement satisfied: curio (>=1.2,<2.0) ; extra == "curio" (installed: curio 1.6) Handling h2 (>=4.1.0) ; (python_full_version >= "3.6.2") and (extra == "doh") from hook generated metadata: Requires-Dist Requirement satisfied: h2 (>=4.1.0) ; (python_full_version >= "3.6.2") and (extra == "doh") (installed: h2 4.1.0) Handling httpx (>=0.21.1) ; (python_full_version >= "3.6.2") and (extra == "doh") from hook generated metadata: Requires-Dist Requirement satisfied: httpx (>=0.21.1) ; (python_full_version >= "3.6.2") and (extra == "doh") (installed: httpx 0.23.0) Handling idna (>=2.1,<4.0) ; extra == "idna" from hook generated metadata: Requires-Dist Requirement satisfied: idna (>=2.1,<4.0) ; extra == "idna" (installed: idna 3.4) Handling requests (>=2.23.0,<3.0.0) ; extra == "doh" from hook generated metadata: Requires-Dist Requirement satisfied: requests (>=2.23.0,<3.0.0) ; extra == "doh" (installed: requests 2.28.2) Handling requests-toolbelt (>=0.9.1,<0.11.0) ; extra == "doh" from hook generated metadata: Requires-Dist Requirement satisfied: requests-toolbelt (>=0.9.1,<0.11.0) ; extra == "doh" (installed: requests-toolbelt 0.9.1) Handling sniffio (>=1.1,<2.0) ; extra == "curio" from hook generated metadata: Requires-Dist Requirement satisfied: sniffio (>=1.1,<2.0) ; extra == "curio" (installed: sniffio 1.2.0) Handling trio (>=0.14,<0.23) ; extra == "trio" from hook generated metadata: Requires-Dist Requirement satisfied: trio (>=0.14,<0.23) ; extra == "trio" (installed: trio 0.22.0) Handling wmi (>=1.5.1,<2.0.0) ; extra == "wmi" from hook generated metadata: Requires-Dist Ignoring alien requirement: wmi (>=1.5.1,<2.0.0) ; extra == "wmi" + rm -rfv dnspython-2.3.0.dist-info/ removed 'dnspython-2.3.0.dist-info/METADATA' removed 'dnspython-2.3.0.dist-info/WHEEL' removed 'dnspython-2.3.0.dist-info/LICENSE' removed directory 'dnspython-2.3.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.f2tZii + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd dnspython-2.3.0 + mkdir -p /builddir/build/BUILD/dnspython-2.3.0/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/dnspython-2.3.0/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/dnspython-2.3.0/pyproject-wheeldir Processing /builddir/build/BUILD/dnspython-2.3.0 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: dnspython Building wheel for dnspython (pyproject.toml): started Running command Building wheel for dnspython (pyproject.toml) Building wheel for dnspython (pyproject.toml): finished with status 'done' Created wheel for dnspython: filename=dnspython-2.3.0-py3-none-any.whl size=283678 sha256=1e9eb1b56c1377b04c286ef6e42ba5b0e835f53f1a7f92bd2e62c742e20e9a06 Stored in directory: /builddir/.cache/pip/wheels/74/9c/50/2ee2ddca7a573327f84eb870edd6be68ba9929701696b806c8 Successfully built dnspython + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.aayPa6 + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch ++ dirname /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd dnspython-2.3.0 ++ xargs basename --multiple ++ ls /builddir/build/BUILD/dnspython-2.3.0/pyproject-wheeldir/dnspython-2.3.0-py3-none-any.whl ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=dnspython==2.3.0 + TMPDIR=/builddir/build/BUILD/dnspython-2.3.0/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/dnspython-2.3.0/pyproject-wheeldir dnspython==2.3.0 Using pip 22.3.1 from /usr/lib/python3.11/site-packages/pip (python 3.11) Looking in links: /builddir/build/BUILD/dnspython-2.3.0/pyproject-wheeldir Processing ./pyproject-wheeldir/dnspython-2.3.0-py3-none-any.whl Installing collected packages: dnspython Successfully installed dnspython-2.3.0 + '[' -d /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-dns-2.3.0-2.fc38.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages ']' + site_dirs+=("/usr/lib/python3.11/site-packages") + '[' /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib64/python3.11/site-packages '!=' /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib64/python3.11/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.11/site-packages/dnspython-2.3.0.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages/dnspython-2.3.0.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch --record /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages/dnspython-2.3.0.dist-info/RECORD --output /builddir/build/BUILD/python-dns-2.3.0-2.fc38.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages/dnspython-2.3.0.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages/dnspython-2.3.0.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages/dnspython-2.3.0.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages/dnspython-2.3.0.dist-info/REQUESTED' ++ wc -l /builddir/build/BUILD/python-dns-2.3.0-2.fc38.noarch-pyproject-ghost-distinfo ++ cut -f1 '-d ' + lines=1 + '[' 1 -ne 1 ']' + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-dns-2.3.0-2.fc38.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-dns-2.3.0-2.fc38.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch --sitelib /usr/lib/python3.11/site-packages --sitearch /usr/lib64/python3.11/site-packages --python-version 3.11 --pyproject-record /builddir/build/BUILD/python-dns-2.3.0-2.fc38.noarch-pyproject-record --prefix /usr dns + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 2.3.0-2.fc38 --unique-debug-suffix -2.3.0-2.fc38.noarch --unique-debug-src-base python-dns-2.3.0-2.fc38.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/dnspython-2.3.0 find: 'debug': No such file or directory + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11 using python3.11 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.nPrBvy + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd dnspython-2.3.0 + export OPENSSL_ENABLE_SHA1_SIGNATURES=yes + OPENSSL_ENABLE_SHA1_SIGNATURES=yes + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib64/python3.11/site-packages:/builddir/build/BUILDROOT/python-dns-2.3.0-2.fc38.noarch/usr/lib/python3.11/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/dnspython-2.3.0/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest ============================= test session starts ============================== platform linux -- Python 3.11.2, pytest-7.2.2, pluggy-1.0.0 rootdir: /builddir/build/BUILD/dnspython-2.3.0, configfile: pytest.ini plugins: anyio-3.5.0 collected 1323 items tests/test_address.py .... [ 0%] tests/test_async.py .......FFFFFF..FF....FFF...FFFF.....FFFFFF..FF....FF [ 4%] F...FFFF....FFFsss..FF....FFs...FFFF [ 6%] tests/test_bugs.py ....... [ 7%] tests/test_constants.py ..... [ 7%] tests/test_dnssec.py ................................................... [ 11%] ......... [ 12%] tests/test_doh.py .FFFFF.FFFFFFF [ 13%] tests/test_doq.py sss [ 13%] tests/test_edns.py .............. [ 14%] tests/test_entropy.py ..... [ 15%] tests/test_exceptions.py ..... [ 15%] tests/test_flags.py ............ [ 16%] tests/test_generate.py ................. [ 17%] tests/test_grange.py .......... [ 18%] tests/test_immutable.py ...... [ 18%] tests/test_message.py .................................................. [ 22%] ............... [ 23%] tests/test_name.py ..................................................... [ 27%] ........................................................................ [ 33%] ............................................... [ 36%] tests/test_namedict.py ...................... [ 38%] tests/test_nsec3.py .. [ 38%] tests/test_nsec3_hash.py ... [ 38%] tests/test_ntoaaton.py ............................................. [ 42%] tests/test_processing_order.py .......... [ 43%] tests/test_query.py ..FFF............................... [ 45%] tests/test_rdata.py .................................................... [ 49%] ...................................... [ 52%] tests/test_rdataset.py .................. [ 53%] tests/test_rdtypeandclass.py .......................... [ 55%] tests/test_rdtypeanydnskey.py .. [ 56%] tests/test_rdtypeanyeui.py ........................ [ 57%] tests/test_rdtypeanyloc.py .... [ 58%] tests/test_rdtypeanytkey.py .... [ 58%] tests/test_renderer.py ..... [ 58%] tests/test_resolution.py .............................. [ 61%] tests/test_resolver.py ..........................FFF.FF.FFFFFFFFFFFF.FF. [ 64%] FFFFFFFFFFFF.FF.FFFFFFFFF............... [ 67%] tests/test_resolver_override.py F...F........F [ 68%] tests/test_rrset.py ...................... [ 70%] tests/test_rrset_reader.py ............ [ 71%] tests/test_serial.py ........... [ 72%] tests/test_set.py ................................................... [ 76%] tests/test_svcb.py ................ [ 77%] tests/test_tokenizer.py ................................................ [ 80%] [ 80%] tests/test_transaction.py ............................................. [ 84%] tests/test_tsig.py .................... [ 85%] tests/test_tsigkeyring.py ....... [ 86%] tests/test_ttl.py ....... [ 86%] tests/test_update.py ....................... [ 88%] tests/test_wire.py ....... [ 89%] tests/test_xfr.py ................................. [ 91%] tests/test_zone.py ..................................................... [ 95%] .............................................. [ 99%] tests/test_zonedigest.py ........... [100%] =================================== FAILURES =================================== ______________________ AsyncTests.testCanonicalNameCNAME _______________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameCNAME(self): name = dns.name.from_text("www.dnspython.org") cname = dns.name.from_text("dmfrjf4ips8xa.cloudfront.net") async def run(): return await dns.asyncresolver.canonical_name(name) > self.assertEqual(self.async_run(run), cname) tests/test_async.py:205: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:203: in run return await dns.asyncresolver.canonical_name(name) dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _____________________ AsyncTests.testCanonicalNameDangling _____________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = @unittest.skipIf(_systemd_resolved_present, "systemd-resolved in use") def testCanonicalNameDangling(self): name = dns.name.from_text("dangling-cname.dnspython.org") cname = dns.name.from_text("dangling-target.dnspython.org") async def run(): return await dns.asyncresolver.canonical_name(name) > self.assertEqual(self.async_run(run), cname) tests/test_async.py:215: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:213: in run return await dns.asyncresolver.canonical_name(name) dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _____________________ AsyncTests.testCanonicalNameNoCNAME ______________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameNoCNAME(self): cname = dns.name.from_text("www.google.com") async def run(): return await dns.asyncresolver.canonical_name("www.google.com") > self.assertEqual(self.async_run(run), cname) tests/test_async.py:196: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:194: in run return await dns.asyncresolver.canonical_name("www.google.com") dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________________ AsyncTests.testDOHGetRequest _________________________ remote_host = 'dns.google', remote_port = 443 async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = None, tls: bool = False, ssl_context: Optional[ssl.SSLContext] = None, tls_standard_compatible: bool = True, tls_hostname: Optional[str] = None, happy_eyeballs_delay: float = 0.25 ) -> Union[SocketStream, TLSStream]: """ Connect to a host using the TCP protocol. This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until one connection attempt succeeds. If the first attempt does not connected within 250 milliseconds, a second attempt is started using the next address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if available) is tried first. When the connection has been established, a TLS handshake will be done if either ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. :param remote_host: the IP address or host name to connect to :param remote_port: port on the target host to connect to :param local_host: the interface address or name to bind the socket to before connecting :param tls: ``True`` to do a TLS handshake with the connected stream and return a :class:`~anyio.streams.tls.TLSStream` instead :param ssl_context: the SSL context object to use (if omitted, a default context is created) :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing the stream and requires that the server does this as well. Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. Some protocols, such as HTTP, require this option to be ``False``. See :meth:`~ssl.SSLContext.wrap_socket` for details. :param tls_hostname: host name to check the server certificate against (defaults to the value of ``remote_host``) :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream :raises OSError: if the connection attempt fails """ # Placed here due to https://github.com/python/mypy/issues/7057 connected_stream: Optional[SocketStream] = None async def try_connect(remote_host: str, event: Event) -> None: nonlocal connected_stream try: stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) except OSError as exc: oserrors.append(exc) return else: if connected_stream is None: connected_stream = stream tg.cancel_scope.cancel() else: await stream.aclose() finally: event.set() asynclib = get_asynclib() local_address: Optional[IPSockAddrType] = None family = socket.AF_UNSPEC if local_host: gai_res = await getaddrinfo(str(local_host), None) family, *_, local_address = gai_res[0] target_host = str(remote_host) try: > addr_obj = ip_address(remote_host) /usr/lib/python3.11/site-packages/anyio/_core/_sockets.py:149: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = 'dns.google' def ip_address(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the *address* passed isn't either a v4 or a v6 address """ try: return IPv4Address(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Address(address) except (AddressValueError, NetmaskValueError): pass > raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address') E ValueError: 'dns.google' does not appear to be an IPv4 or IPv6 address /usr/lib64/python3.11/ipaddress.py:54: ValueError During handling of the above exception, another exception occurred: map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: exc_map = { TimeoutError: ConnectTimeout, OSError: ConnectError, anyio.BrokenResourceError: ConnectError, } with map_exceptions(exc_map): with anyio.fail_after(timeout): > stream: anyio.abc.ByteStream = await anyio.connect_tcp( remote_host=host, remote_port=port, local_host=local_address, ) /usr/lib/python3.11/site-packages/httpcore/backends/asyncio.py:109: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ remote_host = 'dns.google', remote_port = 443 async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = None, tls: bool = False, ssl_context: Optional[ssl.SSLContext] = None, tls_standard_compatible: bool = True, tls_hostname: Optional[str] = None, happy_eyeballs_delay: float = 0.25 ) -> Union[SocketStream, TLSStream]: """ Connect to a host using the TCP protocol. This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until one connection attempt succeeds. If the first attempt does not connected within 250 milliseconds, a second attempt is started using the next address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if available) is tried first. When the connection has been established, a TLS handshake will be done if either ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. :param remote_host: the IP address or host name to connect to :param remote_port: port on the target host to connect to :param local_host: the interface address or name to bind the socket to before connecting :param tls: ``True`` to do a TLS handshake with the connected stream and return a :class:`~anyio.streams.tls.TLSStream` instead :param ssl_context: the SSL context object to use (if omitted, a default context is created) :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing the stream and requires that the server does this as well. Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. Some protocols, such as HTTP, require this option to be ``False``. See :meth:`~ssl.SSLContext.wrap_socket` for details. :param tls_hostname: host name to check the server certificate against (defaults to the value of ``remote_host``) :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream :raises OSError: if the connection attempt fails """ # Placed here due to https://github.com/python/mypy/issues/7057 connected_stream: Optional[SocketStream] = None async def try_connect(remote_host: str, event: Event) -> None: nonlocal connected_stream try: stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) except OSError as exc: oserrors.append(exc) return else: if connected_stream is None: connected_stream = stream tg.cancel_scope.cancel() else: await stream.aclose() finally: event.set() asynclib = get_asynclib() local_address: Optional[IPSockAddrType] = None family = socket.AF_UNSPEC if local_host: gai_res = await getaddrinfo(str(local_host), None) family, *_, local_address = gai_res[0] target_host = str(remote_host) try: addr_obj = ip_address(remote_host) except ValueError: # getaddrinfo() will raise an exception if name resolution fails > gai_res = await getaddrinfo(target_host, remote_port, family=family, type=socket.SOCK_STREAM) /usr/lib/python3.11/site-packages/anyio/_core/_sockets.py:152: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = None def run(self): if not self.future.set_running_or_notify_cancel(): return try: > result = self.fn(*self.args, **self.kwargs) /usr/lib64/python3.11/concurrent/futures/thread.py:58: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'dns.google', port = 443, family = type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request( self, request: Request, ) -> Response: assert isinstance(request.stream, AsyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = await self._pool.handle_async_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:353: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: response = await connection.handle_async_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. async with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() await self._attempt_to_acquire_connection(status) except BaseException as exc: await self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: > response = await connection.handle_async_request(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: stream = await self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import AsyncHTTP2Connection self._connection = AsyncHTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = AsyncHTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: > stream = await self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def _connect(self, request: Request) -> AsyncNetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } async with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = await self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: Optional[float] = None, local_address: Optional[str] = None, ) -> AsyncNetworkStream: await self._init_backend() > return await self._backend.connect_tcp( host, port, timeout=timeout, local_address=local_address ) /usr/lib/python3.11/site-packages/httpcore/backends/auto.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: exc_map = { TimeoutError: ConnectTimeout, OSError: ConnectError, anyio.BrokenResourceError: ConnectError, } > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/asyncio.py:107: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = @unittest.skipIf(not dns.query._have_httpx, "httpx not available") def testDOHGetRequest(self): if self.backend.name() == "curio": self.skipTest("anyio dropped curio support") async def run(): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) r = await dns.asyncquery.https(q, nameserver_url, post=False, timeout=4) self.assertTrue(q.is_response(r)) > self.async_run(run) tests/test_async.py:468: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:465: in run r = await dns.asyncquery.https(q, nameserver_url, post=False, timeout=4) dns/asyncquery.py:554: in https response = await the_client.get( /usr/lib/python3.11/site-packages/httpx/_client.py:1751: in get return await self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:1527: in request return await self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:1614: in send response = await self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:1642: in _send_handling_auth response = await self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:1679: in _send_handling_redirects response = await self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1716: in _send_single_request response = await transport.handle_async_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:352: in handle_async_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError ______________________ AsyncTests.testDOHGetRequestHttp1 _______________________ remote_host = 'dns.google', remote_port = 443 async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = None, tls: bool = False, ssl_context: Optional[ssl.SSLContext] = None, tls_standard_compatible: bool = True, tls_hostname: Optional[str] = None, happy_eyeballs_delay: float = 0.25 ) -> Union[SocketStream, TLSStream]: """ Connect to a host using the TCP protocol. This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until one connection attempt succeeds. If the first attempt does not connected within 250 milliseconds, a second attempt is started using the next address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if available) is tried first. When the connection has been established, a TLS handshake will be done if either ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. :param remote_host: the IP address or host name to connect to :param remote_port: port on the target host to connect to :param local_host: the interface address or name to bind the socket to before connecting :param tls: ``True`` to do a TLS handshake with the connected stream and return a :class:`~anyio.streams.tls.TLSStream` instead :param ssl_context: the SSL context object to use (if omitted, a default context is created) :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing the stream and requires that the server does this as well. Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. Some protocols, such as HTTP, require this option to be ``False``. See :meth:`~ssl.SSLContext.wrap_socket` for details. :param tls_hostname: host name to check the server certificate against (defaults to the value of ``remote_host``) :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream :raises OSError: if the connection attempt fails """ # Placed here due to https://github.com/python/mypy/issues/7057 connected_stream: Optional[SocketStream] = None async def try_connect(remote_host: str, event: Event) -> None: nonlocal connected_stream try: stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) except OSError as exc: oserrors.append(exc) return else: if connected_stream is None: connected_stream = stream tg.cancel_scope.cancel() else: await stream.aclose() finally: event.set() asynclib = get_asynclib() local_address: Optional[IPSockAddrType] = None family = socket.AF_UNSPEC if local_host: gai_res = await getaddrinfo(str(local_host), None) family, *_, local_address = gai_res[0] target_host = str(remote_host) try: > addr_obj = ip_address(remote_host) /usr/lib/python3.11/site-packages/anyio/_core/_sockets.py:149: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = 'dns.google' def ip_address(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the *address* passed isn't either a v4 or a v6 address """ try: return IPv4Address(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Address(address) except (AddressValueError, NetmaskValueError): pass > raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address') E ValueError: 'dns.google' does not appear to be an IPv4 or IPv6 address /usr/lib64/python3.11/ipaddress.py:54: ValueError During handling of the above exception, another exception occurred: map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: exc_map = { TimeoutError: ConnectTimeout, OSError: ConnectError, anyio.BrokenResourceError: ConnectError, } with map_exceptions(exc_map): with anyio.fail_after(timeout): > stream: anyio.abc.ByteStream = await anyio.connect_tcp( remote_host=host, remote_port=port, local_host=local_address, ) /usr/lib/python3.11/site-packages/httpcore/backends/asyncio.py:109: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ remote_host = 'dns.google', remote_port = 443 async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = None, tls: bool = False, ssl_context: Optional[ssl.SSLContext] = None, tls_standard_compatible: bool = True, tls_hostname: Optional[str] = None, happy_eyeballs_delay: float = 0.25 ) -> Union[SocketStream, TLSStream]: """ Connect to a host using the TCP protocol. This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until one connection attempt succeeds. If the first attempt does not connected within 250 milliseconds, a second attempt is started using the next address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if available) is tried first. When the connection has been established, a TLS handshake will be done if either ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. :param remote_host: the IP address or host name to connect to :param remote_port: port on the target host to connect to :param local_host: the interface address or name to bind the socket to before connecting :param tls: ``True`` to do a TLS handshake with the connected stream and return a :class:`~anyio.streams.tls.TLSStream` instead :param ssl_context: the SSL context object to use (if omitted, a default context is created) :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing the stream and requires that the server does this as well. Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. Some protocols, such as HTTP, require this option to be ``False``. See :meth:`~ssl.SSLContext.wrap_socket` for details. :param tls_hostname: host name to check the server certificate against (defaults to the value of ``remote_host``) :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream :raises OSError: if the connection attempt fails """ # Placed here due to https://github.com/python/mypy/issues/7057 connected_stream: Optional[SocketStream] = None async def try_connect(remote_host: str, event: Event) -> None: nonlocal connected_stream try: stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) except OSError as exc: oserrors.append(exc) return else: if connected_stream is None: connected_stream = stream tg.cancel_scope.cancel() else: await stream.aclose() finally: event.set() asynclib = get_asynclib() local_address: Optional[IPSockAddrType] = None family = socket.AF_UNSPEC if local_host: gai_res = await getaddrinfo(str(local_host), None) family, *_, local_address = gai_res[0] target_host = str(remote_host) try: addr_obj = ip_address(remote_host) except ValueError: # getaddrinfo() will raise an exception if name resolution fails > gai_res = await getaddrinfo(target_host, remote_port, family=family, type=socket.SOCK_STREAM) /usr/lib/python3.11/site-packages/anyio/_core/_sockets.py:152: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = None def run(self): if not self.future.set_running_or_notify_cancel(): return try: > result = self.fn(*self.args, **self.kwargs) /usr/lib64/python3.11/concurrent/futures/thread.py:58: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'dns.google', port = 443, family = type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request( self, request: Request, ) -> Response: assert isinstance(request.stream, AsyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = await self._pool.handle_async_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:353: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: response = await connection.handle_async_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. async with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() await self._attempt_to_acquire_connection(status) except BaseException as exc: await self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: > response = await connection.handle_async_request(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: stream = await self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import AsyncHTTP2Connection self._connection = AsyncHTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = AsyncHTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: > stream = await self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def _connect(self, request: Request) -> AsyncNetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } async with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = await self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: Optional[float] = None, local_address: Optional[str] = None, ) -> AsyncNetworkStream: await self._init_backend() > return await self._backend.connect_tcp( host, port, timeout=timeout, local_address=local_address ) /usr/lib/python3.11/site-packages/httpcore/backends/auto.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: exc_map = { TimeoutError: ConnectTimeout, OSError: ConnectError, anyio.BrokenResourceError: ConnectError, } > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/asyncio.py:107: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = @unittest.skipIf(not dns.query._have_httpx, "httpx not available") def testDOHGetRequestHttp1(self): if self.backend.name() == "curio": self.skipTest("anyio dropped curio support") async def run(): saved_have_http2 = dns.query._have_http2 try: dns.query._have_http2 = False nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) r = await dns.asyncquery.https(q, nameserver_url, post=False, timeout=4) self.assertTrue(q.is_response(r)) finally: dns.query._have_http2 = saved_have_http2 > self.async_run(run) tests/test_async.py:486: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:481: in run r = await dns.asyncquery.https(q, nameserver_url, post=False, timeout=4) dns/asyncquery.py:554: in https response = await the_client.get( /usr/lib/python3.11/site-packages/httpx/_client.py:1751: in get return await self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:1527: in request return await self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:1614: in send response = await self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:1642: in _send_handling_auth response = await self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:1679: in _send_handling_redirects response = await self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1716: in _send_single_request response = await transport.handle_async_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:352: in handle_async_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError ________________________ AsyncTests.testDOHPostRequest _________________________ remote_host = 'cloudflare-dns.com', remote_port = 443 async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = None, tls: bool = False, ssl_context: Optional[ssl.SSLContext] = None, tls_standard_compatible: bool = True, tls_hostname: Optional[str] = None, happy_eyeballs_delay: float = 0.25 ) -> Union[SocketStream, TLSStream]: """ Connect to a host using the TCP protocol. This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until one connection attempt succeeds. If the first attempt does not connected within 250 milliseconds, a second attempt is started using the next address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if available) is tried first. When the connection has been established, a TLS handshake will be done if either ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. :param remote_host: the IP address or host name to connect to :param remote_port: port on the target host to connect to :param local_host: the interface address or name to bind the socket to before connecting :param tls: ``True`` to do a TLS handshake with the connected stream and return a :class:`~anyio.streams.tls.TLSStream` instead :param ssl_context: the SSL context object to use (if omitted, a default context is created) :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing the stream and requires that the server does this as well. Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. Some protocols, such as HTTP, require this option to be ``False``. See :meth:`~ssl.SSLContext.wrap_socket` for details. :param tls_hostname: host name to check the server certificate against (defaults to the value of ``remote_host``) :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream :raises OSError: if the connection attempt fails """ # Placed here due to https://github.com/python/mypy/issues/7057 connected_stream: Optional[SocketStream] = None async def try_connect(remote_host: str, event: Event) -> None: nonlocal connected_stream try: stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) except OSError as exc: oserrors.append(exc) return else: if connected_stream is None: connected_stream = stream tg.cancel_scope.cancel() else: await stream.aclose() finally: event.set() asynclib = get_asynclib() local_address: Optional[IPSockAddrType] = None family = socket.AF_UNSPEC if local_host: gai_res = await getaddrinfo(str(local_host), None) family, *_, local_address = gai_res[0] target_host = str(remote_host) try: > addr_obj = ip_address(remote_host) /usr/lib/python3.11/site-packages/anyio/_core/_sockets.py:149: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = 'cloudflare-dns.com' def ip_address(address): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the *address* passed isn't either a v4 or a v6 address """ try: return IPv4Address(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Address(address) except (AddressValueError, NetmaskValueError): pass > raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address') E ValueError: 'cloudflare-dns.com' does not appear to be an IPv4 or IPv6 address /usr/lib64/python3.11/ipaddress.py:54: ValueError During handling of the above exception, another exception occurred: map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: exc_map = { TimeoutError: ConnectTimeout, OSError: ConnectError, anyio.BrokenResourceError: ConnectError, } with map_exceptions(exc_map): with anyio.fail_after(timeout): > stream: anyio.abc.ByteStream = await anyio.connect_tcp( remote_host=host, remote_port=port, local_host=local_address, ) /usr/lib/python3.11/site-packages/httpcore/backends/asyncio.py:109: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ remote_host = 'cloudflare-dns.com', remote_port = 443 async def connect_tcp( remote_host: IPAddressType, remote_port: int, *, local_host: Optional[IPAddressType] = None, tls: bool = False, ssl_context: Optional[ssl.SSLContext] = None, tls_standard_compatible: bool = True, tls_hostname: Optional[str] = None, happy_eyeballs_delay: float = 0.25 ) -> Union[SocketStream, TLSStream]: """ Connect to a host using the TCP protocol. This function implements the stateless version of the Happy Eyeballs algorithm (RFC 6555). If ``address`` is a host name that resolves to multiple IP addresses, each one is tried until one connection attempt succeeds. If the first attempt does not connected within 250 milliseconds, a second attempt is started using the next address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if available) is tried first. When the connection has been established, a TLS handshake will be done if either ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``. :param remote_host: the IP address or host name to connect to :param remote_port: port on the target host to connect to :param local_host: the interface address or name to bind the socket to before connecting :param tls: ``True`` to do a TLS handshake with the connected stream and return a :class:`~anyio.streams.tls.TLSStream` instead :param ssl_context: the SSL context object to use (if omitted, a default context is created) :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake before closing the stream and requires that the server does this as well. Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream. Some protocols, such as HTTP, require this option to be ``False``. See :meth:`~ssl.SSLContext.wrap_socket` for details. :param tls_hostname: host name to check the server certificate against (defaults to the value of ``remote_host``) :param happy_eyeballs_delay: delay (in seconds) before starting the next connection attempt :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream :raises OSError: if the connection attempt fails """ # Placed here due to https://github.com/python/mypy/issues/7057 connected_stream: Optional[SocketStream] = None async def try_connect(remote_host: str, event: Event) -> None: nonlocal connected_stream try: stream = await asynclib.connect_tcp(remote_host, remote_port, local_address) except OSError as exc: oserrors.append(exc) return else: if connected_stream is None: connected_stream = stream tg.cancel_scope.cancel() else: await stream.aclose() finally: event.set() asynclib = get_asynclib() local_address: Optional[IPSockAddrType] = None family = socket.AF_UNSPEC if local_host: gai_res = await getaddrinfo(str(local_host), None) family, *_, local_address = gai_res[0] target_host = str(remote_host) try: addr_obj = ip_address(remote_host) except ValueError: # getaddrinfo() will raise an exception if name resolution fails > gai_res = await getaddrinfo(target_host, remote_port, family=family, type=socket.SOCK_STREAM) /usr/lib/python3.11/site-packages/anyio/_core/_sockets.py:152: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = None def run(self): if not self.future.set_running_or_notify_cancel(): return try: > result = self.fn(*self.args, **self.kwargs) /usr/lib64/python3.11/concurrent/futures/thread.py:58: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'cloudflare-dns.com', port = 443, family = type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request( self, request: Request, ) -> Response: assert isinstance(request.stream, AsyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = await self._pool.handle_async_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:353: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: response = await connection.handle_async_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. async with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() await self._attempt_to_acquire_connection(status) except BaseException as exc: await self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: > response = await connection.handle_async_request(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: stream = await self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import AsyncHTTP2Connection self._connection = AsyncHTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = AsyncHTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: > stream = await self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def _connect(self, request: Request) -> AsyncNetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } async with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = await self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: Optional[float] = None, local_address: Optional[str] = None, ) -> AsyncNetworkStream: await self._init_backend() > return await self._backend.connect_tcp( host, port, timeout=timeout, local_address=local_address ) /usr/lib/python3.11/site-packages/httpcore/backends/auto.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: exc_map = { TimeoutError: ConnectTimeout, OSError: ConnectError, anyio.BrokenResourceError: ConnectError, } > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/asyncio.py:107: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = @unittest.skipIf(not dns.query._have_httpx, "httpx not available") def testDOHPostRequest(self): if self.backend.name() == "curio": self.skipTest("anyio dropped curio support") async def run(): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) r = await dns.asyncquery.https(q, nameserver_url, post=True, timeout=4) self.assertTrue(q.is_response(r)) > self.async_run(run) tests/test_async.py:499: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:496: in run r = await dns.asyncquery.https(q, nameserver_url, post=True, timeout=4) dns/asyncquery.py:548: in https response = await the_client.post( /usr/lib/python3.11/site-packages/httpx/_client.py:1842: in post return await self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:1527: in request return await self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:1614: in send response = await self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:1642: in _send_handling_auth response = await self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:1679: in _send_handling_redirects response = await self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1716: in _send_single_request response = await transport.handle_async_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:352: in handle_async_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError ___________________________ AsyncTests.testQueryTLS ____________________________ fut = > timeout = 2 async def wait_for(fut, timeout): """Wait for the single Future or coroutine to complete, with timeout. Coroutine will be wrapped in Task. Returns result of the Future or coroutine. When a timeout occurs, it cancels the task and raises TimeoutError. To avoid the task cancellation, wrap it in shield(). If the wait is cancelled, the task is also cancelled. This function is a coroutine. """ loop = events.get_running_loop() if timeout is None: return await fut if timeout <= 0: fut = ensure_future(fut, loop=loop) if fut.done(): return fut.result() await _cancel_and_wait(fut, loop=loop) try: return fut.result() except exceptions.CancelledError as exc: raise exceptions.TimeoutError() from exc waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) fut = ensure_future(fut, loop=loop) fut.add_done_callback(cb) try: # wait until the future completes or the timeout try: await waiter except exceptions.CancelledError: if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) raise if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) # In case task cancellation failed with some # exception, we should re-raise it # See https://bugs.python.org/issue40607 try: > return fut.result() /usr/lib64/python3.11/asyncio/tasks.py:490: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = '8.8.8.8', port = 853, limit = 65536 kwds = {'family': , 'local_addr': None, 'proto': 0, 'server_hostname': None, ...} loop = <_UnixSelectorEventLoop running=False closed=True debug=False> reader = async def open_connection(host=None, port=None, *, limit=_DEFAULT_LIMIT, **kwds): """A wrapper for create_connection() returning a (reader, writer) pair. The reader returned is a StreamReader instance; the writer is a StreamWriter instance. The arguments are all the usual arguments to create_connection() except protocol_factory; most common are positional host and port, with various optional keyword arguments following. Additional optional keyword arguments are loop (to set the event loop instance to use) and limit (to set the buffer limit passed to the StreamReader). (If you want to customize the StreamReader and/or StreamReaderProtocol classes, just copy the code -- there's really nothing special here except some convenience.) """ loop = events.get_running_loop() reader = StreamReader(limit=limit, loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) > transport, _ = await loop.create_connection( lambda: protocol, host, port, **kwds) /usr/lib64/python3.11/asyncio/streams.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <_UnixSelectorEventLoop running=False closed=True debug=False> protocol_factory = . at 0x3fd81eac00> host = '8.8.8.8', port = 853 async def create_connection( self, protocol_factory, host=None, port=None, *, ssl=None, family=0, proto=0, flags=0, sock=None, local_addr=None, server_hostname=None, ssl_handshake_timeout=None, ssl_shutdown_timeout=None, happy_eyeballs_delay=None, interleave=None): """Connect to a TCP server. Create a streaming transport connection to a given internet host and port: socket family AF_INET or socket.AF_INET6 depending on host (or family if specified), socket type SOCK_STREAM. protocol_factory must be a callable returning a protocol instance. This method is a coroutine which will try to establish the connection in the background. When successful, the coroutine returns a (transport, protocol) pair. """ if server_hostname is not None and not ssl: raise ValueError('server_hostname is only meaningful with ssl') if server_hostname is None and ssl: # Use host as default for server_hostname. It is an error # if host is empty or not set, e.g. when an # already-connected socket was passed or when only a port # is given. To avoid this error, you can pass # server_hostname='' -- this will bypass the hostname # check. (This also means that if host is a numeric # IP/IPv6 address, we will attempt to verify that exact # address; this will probably fail, but it is possible to # create a certificate for a specific IP address, so we # don't judge it here.) if not host: raise ValueError('You must set server_hostname ' 'when using ssl without a host') server_hostname = host if ssl_handshake_timeout is not None and not ssl: raise ValueError( 'ssl_handshake_timeout is only meaningful with ssl') if ssl_shutdown_timeout is not None and not ssl: raise ValueError( 'ssl_shutdown_timeout is only meaningful with ssl') if sock is not None: _check_ssl_socket(sock) if happy_eyeballs_delay is not None and interleave is None: # If using happy eyeballs, default to interleave addresses by family interleave = 1 if host is not None or port is not None: if sock is not None: raise ValueError( 'host/port and sock can not be specified at the same time') infos = await self._ensure_resolved( (host, port), family=family, type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self) if not infos: raise OSError('getaddrinfo() returned empty list') if local_addr is not None: laddr_infos = await self._ensure_resolved( local_addr, family=family, type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self) if not laddr_infos: raise OSError('getaddrinfo() returned empty list') else: laddr_infos = None if interleave: infos = _interleave_addrinfos(infos, interleave) exceptions = [] if happy_eyeballs_delay is None: # not using happy eyeballs for addrinfo in infos: try: > sock = await self._connect_sock( exceptions, addrinfo, laddr_infos) /usr/lib64/python3.11/asyncio/base_events.py:1069: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <_UnixSelectorEventLoop running=False closed=True debug=False> exceptions = None addr_info = (, , 6, '', ('8.8.8.8', 853)) local_addr_infos = None async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): """Create, bind and connect one socket.""" my_exceptions = [] exceptions.append(my_exceptions) family, type_, proto, _, address = addr_info sock = None try: sock = socket.socket(family=family, type=type_, proto=proto) sock.setblocking(False) if local_addr_infos is not None: for lfamily, _, _, _, laddr in local_addr_infos: # skip local addresses of different family if lfamily != family: continue try: sock.bind(laddr) break except OSError as exc: msg = ( f'error while attempting to bind on ' f'address {laddr!r}: ' f'{exc.strerror.lower()}' ) exc = OSError(exc.errno, msg) my_exceptions.append(exc) else: # all bind attempts failed if my_exceptions: raise my_exceptions.pop() else: raise OSError(f"no matching local address with {family=} found") > await self.sock_connect(sock, address) /usr/lib64/python3.11/asyncio/base_events.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <_UnixSelectorEventLoop running=False closed=True debug=False> sock = address = ('8.8.8.8', 853) async def sock_connect(self, sock, address): """Connect to a remote socket at address. This method is a coroutine. """ base_events._check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") if sock.family == socket.AF_INET or ( base_events._HAS_IPv6 and sock.family == socket.AF_INET6): resolved = await self._ensure_resolved( address, family=sock.family, type=sock.type, proto=sock.proto, loop=self, ) _, _, _, _, address = resolved[0] fut = self.create_future() self._sock_connect(fut, sock, address) try: > return await fut E asyncio.exceptions.CancelledError /usr/lib64/python3.11/asyncio/selector_events.py:634: CancelledError The above exception was the direct cause of the following exception: awaitable = , timeout = 2 async def _maybe_wait_for(awaitable, timeout): if timeout: try: > return await asyncio.wait_for(awaitable, timeout) dns/_asyncio_backend.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ fut = > timeout = 2 async def wait_for(fut, timeout): """Wait for the single Future or coroutine to complete, with timeout. Coroutine will be wrapped in Task. Returns result of the Future or coroutine. When a timeout occurs, it cancels the task and raises TimeoutError. To avoid the task cancellation, wrap it in shield(). If the wait is cancelled, the task is also cancelled. This function is a coroutine. """ loop = events.get_running_loop() if timeout is None: return await fut if timeout <= 0: fut = ensure_future(fut, loop=loop) if fut.done(): return fut.result() await _cancel_and_wait(fut, loop=loop) try: return fut.result() except exceptions.CancelledError as exc: raise exceptions.TimeoutError() from exc waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) fut = ensure_future(fut, loop=loop) fut.add_done_callback(cb) try: # wait until the future completes or the timeout try: await waiter except exceptions.CancelledError: if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) raise if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) # In case task cancellation failed with some # exception, we should re-raise it # See https://bugs.python.org/issue40607 try: return fut.result() except exceptions.CancelledError as exc: > raise exceptions.TimeoutError() from exc E TimeoutError /usr/lib64/python3.11/asyncio/tasks.py:492: TimeoutError During handling of the above exception, another exception occurred: self = @unittest.skipIf(not _ssl_available, "SSL not available") def testQueryTLS(self): for address in query_addresses: qname = dns.name.from_text("dns.google.") async def run(): q = dns.message.make_query(qname, dns.rdatatype.A) return await dns.asyncquery.tls(q, address, timeout=2) > response = self.async_run(run) tests/test_async.py:349: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:347: in run return await dns.asyncquery.tls(q, address, timeout=2) dns/asyncquery.py:456: in tls cm = await backend.make_socket( dns/_asyncio_backend.py:152: in make_socket (r, w) = await _maybe_wait_for( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ awaitable = , timeout = 2 async def _maybe_wait_for(awaitable, timeout): if timeout: try: return await asyncio.wait_for(awaitable, timeout) except asyncio.TimeoutError: > raise dns.exception.Timeout(timeout=timeout) E dns.exception.Timeout: The DNS operation timed out after 2.000 seconds dns/_asyncio_backend.py:52: Timeout ______________________ AsyncTests.testQueryTLSWithSocket _______________________ fut = > timeout = 2 async def wait_for(fut, timeout): """Wait for the single Future or coroutine to complete, with timeout. Coroutine will be wrapped in Task. Returns result of the Future or coroutine. When a timeout occurs, it cancels the task and raises TimeoutError. To avoid the task cancellation, wrap it in shield(). If the wait is cancelled, the task is also cancelled. This function is a coroutine. """ loop = events.get_running_loop() if timeout is None: return await fut if timeout <= 0: fut = ensure_future(fut, loop=loop) if fut.done(): return fut.result() await _cancel_and_wait(fut, loop=loop) try: return fut.result() except exceptions.CancelledError as exc: raise exceptions.TimeoutError() from exc waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) fut = ensure_future(fut, loop=loop) fut.add_done_callback(cb) try: # wait until the future completes or the timeout try: await waiter except exceptions.CancelledError: if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) raise if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) # In case task cancellation failed with some # exception, we should re-raise it # See https://bugs.python.org/issue40607 try: > return fut.result() /usr/lib64/python3.11/asyncio/tasks.py:490: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = '8.8.8.8', port = 853, limit = 65536 kwds = {'family': , 'local_addr': None, 'proto': 0, 'server_hostname': None, ...} loop = <_UnixSelectorEventLoop running=False closed=True debug=False> reader = async def open_connection(host=None, port=None, *, limit=_DEFAULT_LIMIT, **kwds): """A wrapper for create_connection() returning a (reader, writer) pair. The reader returned is a StreamReader instance; the writer is a StreamWriter instance. The arguments are all the usual arguments to create_connection() except protocol_factory; most common are positional host and port, with various optional keyword arguments following. Additional optional keyword arguments are loop (to set the event loop instance to use) and limit (to set the buffer limit passed to the StreamReader). (If you want to customize the StreamReader and/or StreamReaderProtocol classes, just copy the code -- there's really nothing special here except some convenience.) """ loop = events.get_running_loop() reader = StreamReader(limit=limit, loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) > transport, _ = await loop.create_connection( lambda: protocol, host, port, **kwds) /usr/lib64/python3.11/asyncio/streams.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <_UnixSelectorEventLoop running=False closed=True debug=False> protocol_factory = . at 0x3fd81eaa20> host = '8.8.8.8', port = 853 async def create_connection( self, protocol_factory, host=None, port=None, *, ssl=None, family=0, proto=0, flags=0, sock=None, local_addr=None, server_hostname=None, ssl_handshake_timeout=None, ssl_shutdown_timeout=None, happy_eyeballs_delay=None, interleave=None): """Connect to a TCP server. Create a streaming transport connection to a given internet host and port: socket family AF_INET or socket.AF_INET6 depending on host (or family if specified), socket type SOCK_STREAM. protocol_factory must be a callable returning a protocol instance. This method is a coroutine which will try to establish the connection in the background. When successful, the coroutine returns a (transport, protocol) pair. """ if server_hostname is not None and not ssl: raise ValueError('server_hostname is only meaningful with ssl') if server_hostname is None and ssl: # Use host as default for server_hostname. It is an error # if host is empty or not set, e.g. when an # already-connected socket was passed or when only a port # is given. To avoid this error, you can pass # server_hostname='' -- this will bypass the hostname # check. (This also means that if host is a numeric # IP/IPv6 address, we will attempt to verify that exact # address; this will probably fail, but it is possible to # create a certificate for a specific IP address, so we # don't judge it here.) if not host: raise ValueError('You must set server_hostname ' 'when using ssl without a host') server_hostname = host if ssl_handshake_timeout is not None and not ssl: raise ValueError( 'ssl_handshake_timeout is only meaningful with ssl') if ssl_shutdown_timeout is not None and not ssl: raise ValueError( 'ssl_shutdown_timeout is only meaningful with ssl') if sock is not None: _check_ssl_socket(sock) if happy_eyeballs_delay is not None and interleave is None: # If using happy eyeballs, default to interleave addresses by family interleave = 1 if host is not None or port is not None: if sock is not None: raise ValueError( 'host/port and sock can not be specified at the same time') infos = await self._ensure_resolved( (host, port), family=family, type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self) if not infos: raise OSError('getaddrinfo() returned empty list') if local_addr is not None: laddr_infos = await self._ensure_resolved( local_addr, family=family, type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self) if not laddr_infos: raise OSError('getaddrinfo() returned empty list') else: laddr_infos = None if interleave: infos = _interleave_addrinfos(infos, interleave) exceptions = [] if happy_eyeballs_delay is None: # not using happy eyeballs for addrinfo in infos: try: > sock = await self._connect_sock( exceptions, addrinfo, laddr_infos) /usr/lib64/python3.11/asyncio/base_events.py:1069: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <_UnixSelectorEventLoop running=False closed=True debug=False> exceptions = None addr_info = (, , 6, '', ('8.8.8.8', 853)) local_addr_infos = None async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): """Create, bind and connect one socket.""" my_exceptions = [] exceptions.append(my_exceptions) family, type_, proto, _, address = addr_info sock = None try: sock = socket.socket(family=family, type=type_, proto=proto) sock.setblocking(False) if local_addr_infos is not None: for lfamily, _, _, _, laddr in local_addr_infos: # skip local addresses of different family if lfamily != family: continue try: sock.bind(laddr) break except OSError as exc: msg = ( f'error while attempting to bind on ' f'address {laddr!r}: ' f'{exc.strerror.lower()}' ) exc = OSError(exc.errno, msg) my_exceptions.append(exc) else: # all bind attempts failed if my_exceptions: raise my_exceptions.pop() else: raise OSError(f"no matching local address with {family=} found") > await self.sock_connect(sock, address) /usr/lib64/python3.11/asyncio/base_events.py:973: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <_UnixSelectorEventLoop running=False closed=True debug=False> sock = address = ('8.8.8.8', 853) async def sock_connect(self, sock, address): """Connect to a remote socket at address. This method is a coroutine. """ base_events._check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") if sock.family == socket.AF_INET or ( base_events._HAS_IPv6 and sock.family == socket.AF_INET6): resolved = await self._ensure_resolved( address, family=sock.family, type=sock.type, proto=sock.proto, loop=self, ) _, _, _, _, address = resolved[0] fut = self.create_future() self._sock_connect(fut, sock, address) try: > return await fut E asyncio.exceptions.CancelledError /usr/lib64/python3.11/asyncio/selector_events.py:634: CancelledError The above exception was the direct cause of the following exception: awaitable = , timeout = 2 async def _maybe_wait_for(awaitable, timeout): if timeout: try: > return await asyncio.wait_for(awaitable, timeout) dns/_asyncio_backend.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ fut = > timeout = 2 async def wait_for(fut, timeout): """Wait for the single Future or coroutine to complete, with timeout. Coroutine will be wrapped in Task. Returns result of the Future or coroutine. When a timeout occurs, it cancels the task and raises TimeoutError. To avoid the task cancellation, wrap it in shield(). If the wait is cancelled, the task is also cancelled. This function is a coroutine. """ loop = events.get_running_loop() if timeout is None: return await fut if timeout <= 0: fut = ensure_future(fut, loop=loop) if fut.done(): return fut.result() await _cancel_and_wait(fut, loop=loop) try: return fut.result() except exceptions.CancelledError as exc: raise exceptions.TimeoutError() from exc waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) fut = ensure_future(fut, loop=loop) fut.add_done_callback(cb) try: # wait until the future completes or the timeout try: await waiter except exceptions.CancelledError: if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) raise if fut.done(): return fut.result() else: fut.remove_done_callback(cb) # We must ensure that the task is not running # after wait_for() returns. # See https://bugs.python.org/issue32751 await _cancel_and_wait(fut, loop=loop) # In case task cancellation failed with some # exception, we should re-raise it # See https://bugs.python.org/issue40607 try: return fut.result() except exceptions.CancelledError as exc: > raise exceptions.TimeoutError() from exc E TimeoutError /usr/lib64/python3.11/asyncio/tasks.py:492: TimeoutError During handling of the above exception, another exception occurred: self = @unittest.skipIf(not _ssl_available, "SSL not available") def testQueryTLSWithSocket(self): for address in query_addresses: qname = dns.name.from_text("dns.google.") async def run(): ssl_context = ssl.create_default_context() ssl_context.check_hostname = False async with await self.backend.make_socket( dns.inet.af_for_address(address), socket.SOCK_STREAM, 0, None, (address, 853), 2, ssl_context, None, ) as s: # for basic coverage await s.getsockname() q = dns.message.make_query(qname, dns.rdatatype.A) return await dns.asyncquery.tls(q, "8.8.8.8", sock=s, timeout=2) > response = self.async_run(run) tests/test_async.py:381: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:366: in run async with await self.backend.make_socket( dns/_asyncio_backend.py:152: in make_socket (r, w) = await _maybe_wait_for( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ awaitable = , timeout = 2 async def _maybe_wait_for(awaitable, timeout): if timeout: try: return await asyncio.wait_for(awaitable, timeout) except asyncio.TimeoutError: > raise dns.exception.Timeout(timeout=timeout) E dns.exception.Timeout: The DNS operation timed out after 2.000 seconds dns/_asyncio_backend.py:52: Timeout ____________________________ AsyncTests.testResolve ____________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolve(self): async def run(): answer = await dns.asyncresolver.resolve("dns.google.", "A") return set([rdata.address for rdata in answer]) > seen = self.async_run(run) tests/test_async.py:178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:175: in run answer = await dns.asyncresolver.resolve("dns.google.", "A") dns/asyncresolver.py:223: in resolve return await get_default_resolver().resolve( dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ________________________ AsyncTests.testResolveAddress _________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveAddress(self): async def run(): return await dns.asyncresolver.resolve_address("8.8.8.8") > answer = self.async_run(run) tests/test_async.py:186: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:184: in run return await dns.asyncresolver.resolve_address("8.8.8.8") dns/asyncresolver.py:246: in resolve_address return await get_default_resolver().resolve_address(ipaddr, *args, **kwargs) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration __________________________ AsyncTests.testResolverDOH __________________________ self = @unittest.skipIf(not dns.query._have_httpx, "httpx not available") def testResolverDOH(self): if self.backend.name() == "curio": self.skipTest("anyio dropped curio support") async def run(): res = dns.asyncresolver.Resolver(configure=False) res.nameservers = ["https://dns.google/dns-query"] answer = await res.resolve("dns.google", "A", backend=self.backend) seen = set([rdata.address for rdata in answer]) self.assertTrue("8.8.8.8" in seen) self.assertTrue("8.8.4.4" in seen) > self.async_run(run) tests/test_async.py:514: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:509: in run answer = await res.resolve("dns.google", "A", backend=self.backend) dns/asyncresolver.py:89: in resolve timeout = self._compute_timeout(start, lifetime, resolution.errors) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = start = 1681629357.3086991, lifetime = 5.0 errors = [('https://dns.google/dns-query', False, 53, ConnectError('[Errno -3] Temporary failure in name resolution'), None), (...e), ('https://dns.google/dns-query', False, 53, ConnectError('[Errno -3] Temporary failure in name resolution'), None)] def _compute_timeout( self, start: float, lifetime: Optional[float] = None, errors: Optional[List[ErrorTuple]] = None, ) -> float: lifetime = self.lifetime if lifetime is None else lifetime now = time.time() duration = now - start if errors is None: errors = [] if duration < 0: if duration < -1: # Time going backwards is bad. Just give up. raise LifetimeTimeout(timeout=duration, errors=errors) else: # Time went backwards, but only a little. This can # happen, e.g. under vmware with older linux kernels. # Pretend it didn't happen. duration = 0 if duration >= lifetime: > raise LifetimeTimeout(timeout=duration, errors=errors) E dns.resolver.LifetimeTimeout: The resolution lifetime expired after 5.757 seconds: Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution dns/resolver.py:988: LifetimeTimeout _________________________ AsyncTests.testZoneForName1 __________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName1(self): async def run(): name = dns.name.from_text("www.dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:223: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:220: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________________ AsyncTests.testZoneForName2 __________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName2(self): async def run(): name = dns.name.from_text("a.b.www.dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:232: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:229: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________________ AsyncTests.testZoneForName3 __________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName3(self): async def run(): name = dns.name.from_text("dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:241: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:238: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________________ AsyncTests.testZoneForName4 __________________________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName4(self): def bad(): name = dns.name.from_text("dnspython.org", None) async def run(): return await dns.asyncresolver.zone_for_name(name) self.async_run(run) > self.assertRaises(dns.resolver.NotAbsolute, bad) tests/test_async.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:251: in bad self.async_run(run) tests/test_async.py:171: in async_run return asyncio.run(afunc()) /usr/lib64/python3.11/asyncio/runners.py:190: in run return runner.run(main) /usr/lib64/python3.11/asyncio/runners.py:118: in run return self._loop.run_until_complete(task) /usr/lib64/python3.11/asyncio/base_events.py:653: in run_until_complete return future.result() tests/test_async.py:249: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________________ TrioAsyncTests.testCanonicalNameCNAME _____________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameCNAME(self): name = dns.name.from_text("www.dnspython.org") cname = dns.name.from_text("dmfrjf4ips8xa.cloudfront.net") async def run(): return await dns.asyncresolver.canonical_name(name) > self.assertEqual(self.async_run(run), cname) tests/test_async.py:205: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:203: in run return await dns.asyncresolver.canonical_name(name) dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ TrioAsyncTests.testCanonicalNameDangling ___________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = @unittest.skipIf(_systemd_resolved_present, "systemd-resolved in use") def testCanonicalNameDangling(self): name = dns.name.from_text("dangling-cname.dnspython.org") cname = dns.name.from_text("dangling-target.dnspython.org") async def run(): return await dns.asyncresolver.canonical_name(name) > self.assertEqual(self.async_run(run), cname) tests/test_async.py:215: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:213: in run return await dns.asyncresolver.canonical_name(name) dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ TrioAsyncTests.testCanonicalNameNoCNAME ____________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameNoCNAME(self): cname = dns.name.from_text("www.google.com") async def run(): return await dns.asyncresolver.canonical_name("www.google.com") > self.assertEqual(self.async_run(run), cname) tests/test_async.py:196: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:194: in run return await dns.asyncresolver.canonical_name("www.google.com") dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ TrioAsyncTests.testDOHGetRequest _______________________ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: timeout_or_inf = float("inf") if timeout is None else timeout exc_map = { trio.TooSlowError: ConnectTimeout, trio.BrokenResourceError: ConnectError, OSError: ConnectError, } # Trio supports 'local_address' from 0.16.1 onwards. # We only include the keyword argument if a local_address # argument has been passed. kwargs: dict = {} if local_address is None else {"local_address": local_address} with map_exceptions(exc_map): with trio.fail_after(timeout_or_inf): > stream: trio.abc.Stream = await trio.open_tcp_stream( host=host, port=port, **kwargs ) /usr/lib/python3.11/site-packages/httpcore/backends/trio.py:122: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'cloudflare-dns.com', port = 443 async def open_tcp_stream( host, port, *, happy_eyeballs_delay=DEFAULT_DELAY, local_address=None ): """Connect to the given host and port over TCP. If the given ``host`` has multiple IP addresses associated with it, then we have a problem: which one do we use? One approach would be to attempt to connect to the first one, and then if that fails, attempt to connect to the second one ... until we've tried all of them. But the problem with this is that if the first IP address is unreachable (for example, because it's an IPv6 address and our network discards IPv6 packets), then we might end up waiting tens of seconds for the first connection attempt to timeout before we try the second address. Another approach would be to attempt to connect to all of the addresses at the same time, in parallel, and then use whichever connection succeeds first, abandoning the others. This would be fast, but create a lot of unnecessary load on the network and the remote server. This function strikes a balance between these two extremes: it works its way through the available addresses one at a time, like the first approach; but, if ``happy_eyeballs_delay`` seconds have passed and it's still waiting for an attempt to succeed or fail, then it gets impatient and starts the next connection attempt in parallel. As soon as any one connection attempt succeeds, all the other attempts are cancelled. This avoids unnecessary load because most connections will succeed after just one or two attempts, but if one of the addresses is unreachable then it doesn't slow us down too much. This is known as a "happy eyeballs" algorithm, and our particular variant is modelled after how Chrome connects to webservers; see `RFC 6555 `__ for more details. Args: host (str or bytes): The host to connect to. Can be an IPv4 address, IPv6 address, or a hostname. port (int): The port to connect to. happy_eyeballs_delay (float): How many seconds to wait for each connection attempt to succeed or fail before getting impatient and starting another one in parallel. Set to `math.inf` if you want to limit to only one connection attempt at a time (like :func:`socket.create_connection`). Default: 0.25 (250 ms). local_address (None or str): The local IP address or hostname to use as the source for outgoing connections. If ``None``, we let the OS pick the source IP. This is useful in some exotic networking configurations where your host has multiple IP addresses, and you want to force the use of a specific one. Note that if you pass an IPv4 ``local_address``, then you won't be able to connect to IPv6 hosts, and vice-versa. If you want to take advantage of this to force the use of IPv4 or IPv6 without specifying an exact source address, you can use the IPv4 wildcard address ``local_address="0.0.0.0"``, or the IPv6 wildcard address ``local_address="::"``. Returns: SocketStream: a :class:`~trio.abc.Stream` connected to the given server. Raises: OSError: if the connection fails. See also: open_ssl_over_tcp_stream """ # To keep our public API surface smaller, rule out some cases that # getaddrinfo will accept in some circumstances, but that act weird or # have non-portable behavior or are just plain not useful. # No type check on host though b/c we want to allow bytes-likes. if host is None: raise ValueError("host cannot be None") if not isinstance(port, int): raise TypeError("port must be int, not {!r}".format(port)) if happy_eyeballs_delay is None: happy_eyeballs_delay = DEFAULT_DELAY > targets = await getaddrinfo(host, port, type=SOCK_STREAM) /usr/lib/python3.11/site-packages/trio/_highlevel_open_tcp_stream.py:259: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 async def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Look up a numeric address given a name. Arguments and return values are identical to :func:`socket.getaddrinfo`, except that this version is async. Also, :func:`trio.socket.getaddrinfo` correctly uses IDNA 2008 to process non-ASCII domain names. (:func:`socket.getaddrinfo` uses IDNA 2003, which can give the wrong result in some cases and cause you to connect to a different host than the one you intended; see `bpo-17305 `__.) This function's behavior can be customized using :func:`set_custom_hostname_resolver`. """ # If host and port are numeric, then getaddrinfo doesn't block and we can # skip the whole thread thing, which seems worthwhile. So we try first # with the _NUMERIC_ONLY flags set, and then only spawn a thread if that # fails with EAI_NONAME: def numeric_only_failure(exc): return ( isinstance(exc, _stdlib_socket.gaierror) and exc.errno == _stdlib_socket.EAI_NONAME ) async with _try_sync(numeric_only_failure): return _stdlib_socket.getaddrinfo( host, port, family, type, proto, flags | _NUMERIC_ONLY ) # That failed; it's a real hostname. We better use a thread. # # Also, it might be a unicode hostname, in which case we want to do our # own encoding using the idna module, rather than letting Python do # it. (Python will use the old IDNA 2003 standard, and possibly get the # wrong answer - see bpo-17305). However, the idna module is picky, and # will refuse to process some valid hostname strings, like "::1". So if # it's already ascii, we pass it through; otherwise, we encode it to. if isinstance(host, str): try: host = host.encode("ascii") except UnicodeEncodeError: # UTS-46 defines various normalizations; in particular, by default # idna.encode will error out if the hostname has Capital Letters # in it; with uts46=True it will lowercase them instead. host = _idna.encode(host, uts46=True) hr = _resolver.get(None) if hr is not None: return await hr.getaddrinfo(host, port, family, type, proto, flags) else: > return await trio.to_thread.run_sync( _stdlib_socket.getaddrinfo, host, port, family, type, proto, flags, cancellable=True, ) /usr/lib/python3.11/site-packages/trio/_socket.py:183: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ sync_fn = , cancellable = True limiter = args = (b'cloudflare-dns.com', 443, 0, , 0, 0) name = 'trio.to_thread.run_sync-0' @enable_ki_protection async def to_thread_run_sync(sync_fn, *args, cancellable=False, limiter=None): """Convert a blocking operation into an async operation using a thread. These two lines are equivalent:: sync_fn(*args) await trio.to_thread.run_sync(sync_fn, *args) except that if ``sync_fn`` takes a long time, then the first line will block the Trio loop while it runs, while the second line allows other Trio tasks to continue working while ``sync_fn`` runs. This is accomplished by pushing the call to ``sync_fn(*args)`` off into a worker thread. From inside the worker thread, you can get back into Trio using the functions in `trio.from_thread`. Args: sync_fn: An arbitrary synchronous callable. *args: Positional arguments to pass to sync_fn. If you need keyword arguments, use :func:`functools.partial`. cancellable (bool): Whether to allow cancellation of this operation. See discussion below. limiter (None, or CapacityLimiter-like object): An object used to limit the number of simultaneous threads. Most commonly this will be a `~trio.CapacityLimiter`, but it could be anything providing compatible :meth:`~trio.CapacityLimiter.acquire_on_behalf_of` and :meth:`~trio.CapacityLimiter.release_on_behalf_of` methods. This function will call ``acquire_on_behalf_of`` before starting the thread, and ``release_on_behalf_of`` after the thread has finished. If None (the default), uses the default `~trio.CapacityLimiter`, as returned by :func:`current_default_thread_limiter`. **Cancellation handling**: Cancellation is a tricky issue here, because neither Python nor the operating systems it runs on provide any general mechanism for cancelling an arbitrary synchronous function running in a thread. This function will always check for cancellation on entry, before starting the thread. But once the thread is running, there are two ways it can handle being cancelled: * If ``cancellable=False``, the function ignores the cancellation and keeps going, just like if we had called ``sync_fn`` synchronously. This is the default behavior. * If ``cancellable=True``, then this function immediately raises `~trio.Cancelled`. In this case **the thread keeps running in background** – we just abandon it to do whatever it's going to do, and silently discard any return value or errors that it raises. Only use this if you know that the operation is safe and side-effect free. (For example: :func:`trio.socket.getaddrinfo` uses a thread with ``cancellable=True``, because it doesn't really affect anything if a stray hostname lookup keeps running in the background.) The ``limiter`` is only released after the thread has *actually* finished – which in the case of cancellation may be some time after this function has returned. If :func:`trio.run` finishes before the thread does, then the limiter release method will never be called at all. .. warning:: You should not use this function to call long-running CPU-bound functions! In addition to the usual GIL-related reasons why using threads for CPU-bound work is not very effective in Python, there is an additional problem: on CPython, `CPU-bound threads tend to "starve out" IO-bound threads `__, so using threads for CPU-bound work is likely to adversely affect the main thread running Trio. If you need to do this, you're better off using a worker process, or perhaps PyPy (which still has a GIL, but may do a better job of fairly allocating CPU time between threads). Returns: Whatever ``sync_fn(*args)`` returns. Raises: Exception: Whatever ``sync_fn(*args)`` raises. """ await trio.lowlevel.checkpoint_if_cancelled() cancellable = bool(cancellable) # raise early if cancellable.__bool__ raises if limiter is None: limiter = current_default_thread_limiter() # Holds a reference to the task that's blocked in this function waiting # for the result – or None if this function was cancelled and we should # discard the result. task_register = [trio.lowlevel.current_task()] name = f"trio.to_thread.run_sync-{next(_thread_counter)}" placeholder = ThreadPlaceholder(name) # This function gets scheduled into the Trio run loop to deliver the # thread's result. def report_back_in_trio_thread_fn(result): def do_release_then_return_result(): # release_on_behalf_of is an arbitrary user-defined method, so it # might raise an error. If it does, we want that error to # replace the regular return value, and if the regular return was # already an exception then we want them to chain. try: return result.unwrap() finally: limiter.release_on_behalf_of(placeholder) result = outcome.capture(do_release_then_return_result) if task_register[0] is not None: trio.lowlevel.reschedule(task_register[0], result) current_trio_token = trio.lowlevel.current_trio_token() def worker_fn(): current_async_library_cvar.set(None) TOKEN_LOCAL.token = current_trio_token try: ret = sync_fn(*args) if inspect.iscoroutine(ret): # Manually close coroutine to avoid RuntimeWarnings ret.close() raise TypeError( "Trio expected a sync function, but {!r} appears to be " "asynchronous".format(getattr(sync_fn, "__qualname__", sync_fn)) ) return ret finally: del TOKEN_LOCAL.token context = contextvars.copy_context() contextvars_aware_worker_fn = functools.partial(context.run, worker_fn) def deliver_worker_fn_result(result): try: current_trio_token.run_sync_soon(report_back_in_trio_thread_fn, result) except trio.RunFinishedError: # The entire run finished, so the task we're trying to contact is # certainly long gone -- it must have been cancelled and abandoned # us. pass await limiter.acquire_on_behalf_of(placeholder) try: start_thread_soon(contextvars_aware_worker_fn, deliver_worker_fn_result) except: limiter.release_on_behalf_of(placeholder) raise def abort(_): if cancellable: task_register[0] = None return trio.lowlevel.Abort.SUCCEEDED else: return trio.lowlevel.Abort.FAILED > return await trio.lowlevel.wait_task_rescheduled(abort) /usr/lib/python3.11/site-packages/trio/_threads.py:215: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ abort_func = .abort at 0x3fd9cae2a0> async def wait_task_rescheduled(abort_func): """Put the current task to sleep, with cancellation support. This is the lowest-level API for blocking in Trio. Every time a :class:`~trio.lowlevel.Task` blocks, it does so by calling this function (usually indirectly via some higher-level API). This is a tricky interface with no guard rails. If you can use :class:`ParkingLot` or the built-in I/O wait functions instead, then you should. Generally the way it works is that before calling this function, you make arrangements for "someone" to call :func:`reschedule` on the current task at some later point. Then you call :func:`wait_task_rescheduled`, passing in ``abort_func``, an "abort callback". (Terminology: in Trio, "aborting" is the process of attempting to interrupt a blocked task to deliver a cancellation.) There are two possibilities for what happens next: 1. "Someone" calls :func:`reschedule` on the current task, and :func:`wait_task_rescheduled` returns or raises whatever value or error was passed to :func:`reschedule`. 2. The call's context transitions to a cancelled state (e.g. due to a timeout expiring). When this happens, the ``abort_func`` is called. Its interface looks like:: def abort_func(raise_cancel): ... return trio.lowlevel.Abort.SUCCEEDED # or FAILED It should attempt to clean up any state associated with this call, and in particular, arrange that :func:`reschedule` will *not* be called later. If (and only if!) it is successful, then it should return :data:`Abort.SUCCEEDED`, in which case the task will automatically be rescheduled with an appropriate :exc:`~trio.Cancelled` error. Otherwise, it should return :data:`Abort.FAILED`. This means that the task can't be cancelled at this time, and still has to make sure that "someone" eventually calls :func:`reschedule`. At that point there are again two possibilities. You can simply ignore the cancellation altogether: wait for the operation to complete and then reschedule and continue as normal. (For example, this is what :func:`trio.to_thread.run_sync` does if cancellation is disabled.) The other possibility is that the ``abort_func`` does succeed in cancelling the operation, but for some reason isn't able to report that right away. (Example: on Windows, it's possible to request that an async ("overlapped") I/O operation be cancelled, but this request is *also* asynchronous – you don't find out until later whether the operation was actually cancelled or not.) To report a delayed cancellation, then you should reschedule the task yourself, and call the ``raise_cancel`` callback passed to ``abort_func`` to raise a :exc:`~trio.Cancelled` (or possibly :exc:`KeyboardInterrupt`) exception into this task. Either of the approaches sketched below can work:: # Option 1: # Catch the exception from raise_cancel and inject it into the task. # (This is what Trio does automatically for you if you return # Abort.SUCCEEDED.) trio.lowlevel.reschedule(task, outcome.capture(raise_cancel)) # Option 2: # wait to be woken by "someone", and then decide whether to raise # the error from inside the task. outer_raise_cancel = None def abort(inner_raise_cancel): nonlocal outer_raise_cancel outer_raise_cancel = inner_raise_cancel TRY_TO_CANCEL_OPERATION() return trio.lowlevel.Abort.FAILED await wait_task_rescheduled(abort) if OPERATION_WAS_SUCCESSFULLY_CANCELLED: # raises the error outer_raise_cancel() In any case it's guaranteed that we only call the ``abort_func`` at most once per call to :func:`wait_task_rescheduled`. Sometimes, it's useful to be able to share some mutable sleep-related data between the sleeping task, the abort function, and the waking task. You can use the sleeping task's :data:`~Task.custom_sleep_data` attribute to store this data, and Trio won't touch it, except to make sure that it gets cleared when the task is rescheduled. .. warning:: If your ``abort_func`` raises an error, or returns any value other than :data:`Abort.SUCCEEDED` or :data:`Abort.FAILED`, then Trio will crash violently. Be careful! Similarly, it is entirely possible to deadlock a Trio program by failing to reschedule a blocked task, or cause havoc by calling :func:`reschedule` too many times. Remember what we said up above about how you should use a higher-level API if at all possible? """ > return (await _async_yield(WaitTaskRescheduled(abort_func))).unwrap() /usr/lib/python3.11/site-packages/trio/_core/_traps.py:166: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def unwrap(self): self._set_unwrapped() # Tracebacks show the 'raise' line below out of context, so let's give # this variable a name that makes sense out of context. captured_error = self.error try: > raise captured_error /usr/lib/python3.11/site-packages/outcome/_impl.py:138: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def do_release_then_return_result(): # release_on_behalf_of is an arbitrary user-defined method, so it # might raise an error. If it does, we want that error to # replace the regular return value, and if the regular return was # already an exception then we want them to chain. try: > return result.unwrap() /usr/lib/python3.11/site-packages/trio/_threads.py:161: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def unwrap(self): self._set_unwrapped() # Tracebacks show the 'raise' line below out of context, so let's give # this variable a name that makes sense out of context. captured_error = self.error try: > raise captured_error /usr/lib/python3.11/site-packages/outcome/_impl.py:138: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def worker_fn(): current_async_library_cvar.set(None) TOKEN_LOCAL.token = current_trio_token try: > ret = sync_fn(*args) /usr/lib/python3.11/site-packages/trio/_threads.py:175: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request( self, request: Request, ) -> Response: assert isinstance(request.stream, AsyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = await self._pool.handle_async_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:353: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: response = await connection.handle_async_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. async with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() await self._attempt_to_acquire_connection(status) except BaseException as exc: await self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: > response = await connection.handle_async_request(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: stream = await self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import AsyncHTTP2Connection self._connection = AsyncHTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = AsyncHTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: > stream = await self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def _connect(self, request: Request) -> AsyncNetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } async with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = await self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: Optional[float] = None, local_address: Optional[str] = None, ) -> AsyncNetworkStream: await self._init_backend() > return await self._backend.connect_tcp( host, port, timeout=timeout, local_address=local_address ) /usr/lib/python3.11/site-packages/httpcore/backends/auto.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: timeout_or_inf = float("inf") if timeout is None else timeout exc_map = { trio.TooSlowError: ConnectTimeout, trio.BrokenResourceError: ConnectError, OSError: ConnectError, } # Trio supports 'local_address' from 0.16.1 onwards. # We only include the keyword argument if a local_address # argument has been passed. kwargs: dict = {} if local_address is None else {"local_address": local_address} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/trio.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = @unittest.skipIf(not dns.query._have_httpx, "httpx not available") def testDOHGetRequest(self): if self.backend.name() == "curio": self.skipTest("anyio dropped curio support") async def run(): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) r = await dns.asyncquery.https(q, nameserver_url, post=False, timeout=4) self.assertTrue(q.is_response(r)) > self.async_run(run) tests/test_async.py:468: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:465: in run r = await dns.asyncquery.https(q, nameserver_url, post=False, timeout=4) dns/asyncquery.py:554: in https response = await the_client.get( /usr/lib/python3.11/site-packages/httpx/_client.py:1751: in get return await self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:1527: in request return await self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:1614: in send response = await self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:1642: in _send_handling_auth response = await self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:1679: in _send_handling_redirects response = await self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1716: in _send_single_request response = await transport.handle_async_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:352: in handle_async_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError ____________________ TrioAsyncTests.testDOHGetRequestHttp1 _____________________ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: timeout_or_inf = float("inf") if timeout is None else timeout exc_map = { trio.TooSlowError: ConnectTimeout, trio.BrokenResourceError: ConnectError, OSError: ConnectError, } # Trio supports 'local_address' from 0.16.1 onwards. # We only include the keyword argument if a local_address # argument has been passed. kwargs: dict = {} if local_address is None else {"local_address": local_address} with map_exceptions(exc_map): with trio.fail_after(timeout_or_inf): > stream: trio.abc.Stream = await trio.open_tcp_stream( host=host, port=port, **kwargs ) /usr/lib/python3.11/site-packages/httpcore/backends/trio.py:122: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'dns.google', port = 443 async def open_tcp_stream( host, port, *, happy_eyeballs_delay=DEFAULT_DELAY, local_address=None ): """Connect to the given host and port over TCP. If the given ``host`` has multiple IP addresses associated with it, then we have a problem: which one do we use? One approach would be to attempt to connect to the first one, and then if that fails, attempt to connect to the second one ... until we've tried all of them. But the problem with this is that if the first IP address is unreachable (for example, because it's an IPv6 address and our network discards IPv6 packets), then we might end up waiting tens of seconds for the first connection attempt to timeout before we try the second address. Another approach would be to attempt to connect to all of the addresses at the same time, in parallel, and then use whichever connection succeeds first, abandoning the others. This would be fast, but create a lot of unnecessary load on the network and the remote server. This function strikes a balance between these two extremes: it works its way through the available addresses one at a time, like the first approach; but, if ``happy_eyeballs_delay`` seconds have passed and it's still waiting for an attempt to succeed or fail, then it gets impatient and starts the next connection attempt in parallel. As soon as any one connection attempt succeeds, all the other attempts are cancelled. This avoids unnecessary load because most connections will succeed after just one or two attempts, but if one of the addresses is unreachable then it doesn't slow us down too much. This is known as a "happy eyeballs" algorithm, and our particular variant is modelled after how Chrome connects to webservers; see `RFC 6555 `__ for more details. Args: host (str or bytes): The host to connect to. Can be an IPv4 address, IPv6 address, or a hostname. port (int): The port to connect to. happy_eyeballs_delay (float): How many seconds to wait for each connection attempt to succeed or fail before getting impatient and starting another one in parallel. Set to `math.inf` if you want to limit to only one connection attempt at a time (like :func:`socket.create_connection`). Default: 0.25 (250 ms). local_address (None or str): The local IP address or hostname to use as the source for outgoing connections. If ``None``, we let the OS pick the source IP. This is useful in some exotic networking configurations where your host has multiple IP addresses, and you want to force the use of a specific one. Note that if you pass an IPv4 ``local_address``, then you won't be able to connect to IPv6 hosts, and vice-versa. If you want to take advantage of this to force the use of IPv4 or IPv6 without specifying an exact source address, you can use the IPv4 wildcard address ``local_address="0.0.0.0"``, or the IPv6 wildcard address ``local_address="::"``. Returns: SocketStream: a :class:`~trio.abc.Stream` connected to the given server. Raises: OSError: if the connection fails. See also: open_ssl_over_tcp_stream """ # To keep our public API surface smaller, rule out some cases that # getaddrinfo will accept in some circumstances, but that act weird or # have non-portable behavior or are just plain not useful. # No type check on host though b/c we want to allow bytes-likes. if host is None: raise ValueError("host cannot be None") if not isinstance(port, int): raise TypeError("port must be int, not {!r}".format(port)) if happy_eyeballs_delay is None: happy_eyeballs_delay = DEFAULT_DELAY > targets = await getaddrinfo(host, port, type=SOCK_STREAM) /usr/lib/python3.11/site-packages/trio/_highlevel_open_tcp_stream.py:259: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'dns.google', port = 443, family = 0, type = proto = 0, flags = 0 async def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Look up a numeric address given a name. Arguments and return values are identical to :func:`socket.getaddrinfo`, except that this version is async. Also, :func:`trio.socket.getaddrinfo` correctly uses IDNA 2008 to process non-ASCII domain names. (:func:`socket.getaddrinfo` uses IDNA 2003, which can give the wrong result in some cases and cause you to connect to a different host than the one you intended; see `bpo-17305 `__.) This function's behavior can be customized using :func:`set_custom_hostname_resolver`. """ # If host and port are numeric, then getaddrinfo doesn't block and we can # skip the whole thread thing, which seems worthwhile. So we try first # with the _NUMERIC_ONLY flags set, and then only spawn a thread if that # fails with EAI_NONAME: def numeric_only_failure(exc): return ( isinstance(exc, _stdlib_socket.gaierror) and exc.errno == _stdlib_socket.EAI_NONAME ) async with _try_sync(numeric_only_failure): return _stdlib_socket.getaddrinfo( host, port, family, type, proto, flags | _NUMERIC_ONLY ) # That failed; it's a real hostname. We better use a thread. # # Also, it might be a unicode hostname, in which case we want to do our # own encoding using the idna module, rather than letting Python do # it. (Python will use the old IDNA 2003 standard, and possibly get the # wrong answer - see bpo-17305). However, the idna module is picky, and # will refuse to process some valid hostname strings, like "::1". So if # it's already ascii, we pass it through; otherwise, we encode it to. if isinstance(host, str): try: host = host.encode("ascii") except UnicodeEncodeError: # UTS-46 defines various normalizations; in particular, by default # idna.encode will error out if the hostname has Capital Letters # in it; with uts46=True it will lowercase them instead. host = _idna.encode(host, uts46=True) hr = _resolver.get(None) if hr is not None: return await hr.getaddrinfo(host, port, family, type, proto, flags) else: > return await trio.to_thread.run_sync( _stdlib_socket.getaddrinfo, host, port, family, type, proto, flags, cancellable=True, ) /usr/lib/python3.11/site-packages/trio/_socket.py:183: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ sync_fn = , cancellable = True limiter = args = (b'dns.google', 443, 0, , 0, 0) name = 'trio.to_thread.run_sync-1' @enable_ki_protection async def to_thread_run_sync(sync_fn, *args, cancellable=False, limiter=None): """Convert a blocking operation into an async operation using a thread. These two lines are equivalent:: sync_fn(*args) await trio.to_thread.run_sync(sync_fn, *args) except that if ``sync_fn`` takes a long time, then the first line will block the Trio loop while it runs, while the second line allows other Trio tasks to continue working while ``sync_fn`` runs. This is accomplished by pushing the call to ``sync_fn(*args)`` off into a worker thread. From inside the worker thread, you can get back into Trio using the functions in `trio.from_thread`. Args: sync_fn: An arbitrary synchronous callable. *args: Positional arguments to pass to sync_fn. If you need keyword arguments, use :func:`functools.partial`. cancellable (bool): Whether to allow cancellation of this operation. See discussion below. limiter (None, or CapacityLimiter-like object): An object used to limit the number of simultaneous threads. Most commonly this will be a `~trio.CapacityLimiter`, but it could be anything providing compatible :meth:`~trio.CapacityLimiter.acquire_on_behalf_of` and :meth:`~trio.CapacityLimiter.release_on_behalf_of` methods. This function will call ``acquire_on_behalf_of`` before starting the thread, and ``release_on_behalf_of`` after the thread has finished. If None (the default), uses the default `~trio.CapacityLimiter`, as returned by :func:`current_default_thread_limiter`. **Cancellation handling**: Cancellation is a tricky issue here, because neither Python nor the operating systems it runs on provide any general mechanism for cancelling an arbitrary synchronous function running in a thread. This function will always check for cancellation on entry, before starting the thread. But once the thread is running, there are two ways it can handle being cancelled: * If ``cancellable=False``, the function ignores the cancellation and keeps going, just like if we had called ``sync_fn`` synchronously. This is the default behavior. * If ``cancellable=True``, then this function immediately raises `~trio.Cancelled`. In this case **the thread keeps running in background** – we just abandon it to do whatever it's going to do, and silently discard any return value or errors that it raises. Only use this if you know that the operation is safe and side-effect free. (For example: :func:`trio.socket.getaddrinfo` uses a thread with ``cancellable=True``, because it doesn't really affect anything if a stray hostname lookup keeps running in the background.) The ``limiter`` is only released after the thread has *actually* finished – which in the case of cancellation may be some time after this function has returned. If :func:`trio.run` finishes before the thread does, then the limiter release method will never be called at all. .. warning:: You should not use this function to call long-running CPU-bound functions! In addition to the usual GIL-related reasons why using threads for CPU-bound work is not very effective in Python, there is an additional problem: on CPython, `CPU-bound threads tend to "starve out" IO-bound threads `__, so using threads for CPU-bound work is likely to adversely affect the main thread running Trio. If you need to do this, you're better off using a worker process, or perhaps PyPy (which still has a GIL, but may do a better job of fairly allocating CPU time between threads). Returns: Whatever ``sync_fn(*args)`` returns. Raises: Exception: Whatever ``sync_fn(*args)`` raises. """ await trio.lowlevel.checkpoint_if_cancelled() cancellable = bool(cancellable) # raise early if cancellable.__bool__ raises if limiter is None: limiter = current_default_thread_limiter() # Holds a reference to the task that's blocked in this function waiting # for the result – or None if this function was cancelled and we should # discard the result. task_register = [trio.lowlevel.current_task()] name = f"trio.to_thread.run_sync-{next(_thread_counter)}" placeholder = ThreadPlaceholder(name) # This function gets scheduled into the Trio run loop to deliver the # thread's result. def report_back_in_trio_thread_fn(result): def do_release_then_return_result(): # release_on_behalf_of is an arbitrary user-defined method, so it # might raise an error. If it does, we want that error to # replace the regular return value, and if the regular return was # already an exception then we want them to chain. try: return result.unwrap() finally: limiter.release_on_behalf_of(placeholder) result = outcome.capture(do_release_then_return_result) if task_register[0] is not None: trio.lowlevel.reschedule(task_register[0], result) current_trio_token = trio.lowlevel.current_trio_token() def worker_fn(): current_async_library_cvar.set(None) TOKEN_LOCAL.token = current_trio_token try: ret = sync_fn(*args) if inspect.iscoroutine(ret): # Manually close coroutine to avoid RuntimeWarnings ret.close() raise TypeError( "Trio expected a sync function, but {!r} appears to be " "asynchronous".format(getattr(sync_fn, "__qualname__", sync_fn)) ) return ret finally: del TOKEN_LOCAL.token context = contextvars.copy_context() contextvars_aware_worker_fn = functools.partial(context.run, worker_fn) def deliver_worker_fn_result(result): try: current_trio_token.run_sync_soon(report_back_in_trio_thread_fn, result) except trio.RunFinishedError: # The entire run finished, so the task we're trying to contact is # certainly long gone -- it must have been cancelled and abandoned # us. pass await limiter.acquire_on_behalf_of(placeholder) try: start_thread_soon(contextvars_aware_worker_fn, deliver_worker_fn_result) except: limiter.release_on_behalf_of(placeholder) raise def abort(_): if cancellable: task_register[0] = None return trio.lowlevel.Abort.SUCCEEDED else: return trio.lowlevel.Abort.FAILED > return await trio.lowlevel.wait_task_rescheduled(abort) /usr/lib/python3.11/site-packages/trio/_threads.py:215: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ abort_func = .abort at 0x3fd9cae3e0> async def wait_task_rescheduled(abort_func): """Put the current task to sleep, with cancellation support. This is the lowest-level API for blocking in Trio. Every time a :class:`~trio.lowlevel.Task` blocks, it does so by calling this function (usually indirectly via some higher-level API). This is a tricky interface with no guard rails. If you can use :class:`ParkingLot` or the built-in I/O wait functions instead, then you should. Generally the way it works is that before calling this function, you make arrangements for "someone" to call :func:`reschedule` on the current task at some later point. Then you call :func:`wait_task_rescheduled`, passing in ``abort_func``, an "abort callback". (Terminology: in Trio, "aborting" is the process of attempting to interrupt a blocked task to deliver a cancellation.) There are two possibilities for what happens next: 1. "Someone" calls :func:`reschedule` on the current task, and :func:`wait_task_rescheduled` returns or raises whatever value or error was passed to :func:`reschedule`. 2. The call's context transitions to a cancelled state (e.g. due to a timeout expiring). When this happens, the ``abort_func`` is called. Its interface looks like:: def abort_func(raise_cancel): ... return trio.lowlevel.Abort.SUCCEEDED # or FAILED It should attempt to clean up any state associated with this call, and in particular, arrange that :func:`reschedule` will *not* be called later. If (and only if!) it is successful, then it should return :data:`Abort.SUCCEEDED`, in which case the task will automatically be rescheduled with an appropriate :exc:`~trio.Cancelled` error. Otherwise, it should return :data:`Abort.FAILED`. This means that the task can't be cancelled at this time, and still has to make sure that "someone" eventually calls :func:`reschedule`. At that point there are again two possibilities. You can simply ignore the cancellation altogether: wait for the operation to complete and then reschedule and continue as normal. (For example, this is what :func:`trio.to_thread.run_sync` does if cancellation is disabled.) The other possibility is that the ``abort_func`` does succeed in cancelling the operation, but for some reason isn't able to report that right away. (Example: on Windows, it's possible to request that an async ("overlapped") I/O operation be cancelled, but this request is *also* asynchronous – you don't find out until later whether the operation was actually cancelled or not.) To report a delayed cancellation, then you should reschedule the task yourself, and call the ``raise_cancel`` callback passed to ``abort_func`` to raise a :exc:`~trio.Cancelled` (or possibly :exc:`KeyboardInterrupt`) exception into this task. Either of the approaches sketched below can work:: # Option 1: # Catch the exception from raise_cancel and inject it into the task. # (This is what Trio does automatically for you if you return # Abort.SUCCEEDED.) trio.lowlevel.reschedule(task, outcome.capture(raise_cancel)) # Option 2: # wait to be woken by "someone", and then decide whether to raise # the error from inside the task. outer_raise_cancel = None def abort(inner_raise_cancel): nonlocal outer_raise_cancel outer_raise_cancel = inner_raise_cancel TRY_TO_CANCEL_OPERATION() return trio.lowlevel.Abort.FAILED await wait_task_rescheduled(abort) if OPERATION_WAS_SUCCESSFULLY_CANCELLED: # raises the error outer_raise_cancel() In any case it's guaranteed that we only call the ``abort_func`` at most once per call to :func:`wait_task_rescheduled`. Sometimes, it's useful to be able to share some mutable sleep-related data between the sleeping task, the abort function, and the waking task. You can use the sleeping task's :data:`~Task.custom_sleep_data` attribute to store this data, and Trio won't touch it, except to make sure that it gets cleared when the task is rescheduled. .. warning:: If your ``abort_func`` raises an error, or returns any value other than :data:`Abort.SUCCEEDED` or :data:`Abort.FAILED`, then Trio will crash violently. Be careful! Similarly, it is entirely possible to deadlock a Trio program by failing to reschedule a blocked task, or cause havoc by calling :func:`reschedule` too many times. Remember what we said up above about how you should use a higher-level API if at all possible? """ > return (await _async_yield(WaitTaskRescheduled(abort_func))).unwrap() /usr/lib/python3.11/site-packages/trio/_core/_traps.py:166: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def unwrap(self): self._set_unwrapped() # Tracebacks show the 'raise' line below out of context, so let's give # this variable a name that makes sense out of context. captured_error = self.error try: > raise captured_error /usr/lib/python3.11/site-packages/outcome/_impl.py:138: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def do_release_then_return_result(): # release_on_behalf_of is an arbitrary user-defined method, so it # might raise an error. If it does, we want that error to # replace the regular return value, and if the regular return was # already an exception then we want them to chain. try: > return result.unwrap() /usr/lib/python3.11/site-packages/trio/_threads.py:161: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def unwrap(self): self._set_unwrapped() # Tracebacks show the 'raise' line below out of context, so let's give # this variable a name that makes sense out of context. captured_error = self.error try: > raise captured_error /usr/lib/python3.11/site-packages/outcome/_impl.py:138: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def worker_fn(): current_async_library_cvar.set(None) TOKEN_LOCAL.token = current_trio_token try: > ret = sync_fn(*args) /usr/lib/python3.11/site-packages/trio/_threads.py:175: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'dns.google', port = 443, family = 0, type = proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request( self, request: Request, ) -> Response: assert isinstance(request.stream, AsyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = await self._pool.handle_async_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:353: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: response = await connection.handle_async_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. async with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() await self._attempt_to_acquire_connection(status) except BaseException as exc: await self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: > response = await connection.handle_async_request(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: stream = await self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import AsyncHTTP2Connection self._connection = AsyncHTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = AsyncHTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: > stream = await self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def _connect(self, request: Request) -> AsyncNetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } async with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = await self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: Optional[float] = None, local_address: Optional[str] = None, ) -> AsyncNetworkStream: await self._init_backend() > return await self._backend.connect_tcp( host, port, timeout=timeout, local_address=local_address ) /usr/lib/python3.11/site-packages/httpcore/backends/auto.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: timeout_or_inf = float("inf") if timeout is None else timeout exc_map = { trio.TooSlowError: ConnectTimeout, trio.BrokenResourceError: ConnectError, OSError: ConnectError, } # Trio supports 'local_address' from 0.16.1 onwards. # We only include the keyword argument if a local_address # argument has been passed. kwargs: dict = {} if local_address is None else {"local_address": local_address} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/trio.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = @unittest.skipIf(not dns.query._have_httpx, "httpx not available") def testDOHGetRequestHttp1(self): if self.backend.name() == "curio": self.skipTest("anyio dropped curio support") async def run(): saved_have_http2 = dns.query._have_http2 try: dns.query._have_http2 = False nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) r = await dns.asyncquery.https(q, nameserver_url, post=False, timeout=4) self.assertTrue(q.is_response(r)) finally: dns.query._have_http2 = saved_have_http2 > self.async_run(run) tests/test_async.py:486: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:481: in run r = await dns.asyncquery.https(q, nameserver_url, post=False, timeout=4) dns/asyncquery.py:554: in https response = await the_client.get( /usr/lib/python3.11/site-packages/httpx/_client.py:1751: in get return await self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:1527: in request return await self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:1614: in send response = await self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:1642: in _send_handling_auth response = await self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:1679: in _send_handling_redirects response = await self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1716: in _send_single_request response = await transport.handle_async_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:352: in handle_async_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError ______________________ TrioAsyncTests.testDOHPostRequest _______________________ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: timeout_or_inf = float("inf") if timeout is None else timeout exc_map = { trio.TooSlowError: ConnectTimeout, trio.BrokenResourceError: ConnectError, OSError: ConnectError, } # Trio supports 'local_address' from 0.16.1 onwards. # We only include the keyword argument if a local_address # argument has been passed. kwargs: dict = {} if local_address is None else {"local_address": local_address} with map_exceptions(exc_map): with trio.fail_after(timeout_or_inf): > stream: trio.abc.Stream = await trio.open_tcp_stream( host=host, port=port, **kwargs ) /usr/lib/python3.11/site-packages/httpcore/backends/trio.py:122: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'cloudflare-dns.com', port = 443 async def open_tcp_stream( host, port, *, happy_eyeballs_delay=DEFAULT_DELAY, local_address=None ): """Connect to the given host and port over TCP. If the given ``host`` has multiple IP addresses associated with it, then we have a problem: which one do we use? One approach would be to attempt to connect to the first one, and then if that fails, attempt to connect to the second one ... until we've tried all of them. But the problem with this is that if the first IP address is unreachable (for example, because it's an IPv6 address and our network discards IPv6 packets), then we might end up waiting tens of seconds for the first connection attempt to timeout before we try the second address. Another approach would be to attempt to connect to all of the addresses at the same time, in parallel, and then use whichever connection succeeds first, abandoning the others. This would be fast, but create a lot of unnecessary load on the network and the remote server. This function strikes a balance between these two extremes: it works its way through the available addresses one at a time, like the first approach; but, if ``happy_eyeballs_delay`` seconds have passed and it's still waiting for an attempt to succeed or fail, then it gets impatient and starts the next connection attempt in parallel. As soon as any one connection attempt succeeds, all the other attempts are cancelled. This avoids unnecessary load because most connections will succeed after just one or two attempts, but if one of the addresses is unreachable then it doesn't slow us down too much. This is known as a "happy eyeballs" algorithm, and our particular variant is modelled after how Chrome connects to webservers; see `RFC 6555 `__ for more details. Args: host (str or bytes): The host to connect to. Can be an IPv4 address, IPv6 address, or a hostname. port (int): The port to connect to. happy_eyeballs_delay (float): How many seconds to wait for each connection attempt to succeed or fail before getting impatient and starting another one in parallel. Set to `math.inf` if you want to limit to only one connection attempt at a time (like :func:`socket.create_connection`). Default: 0.25 (250 ms). local_address (None or str): The local IP address or hostname to use as the source for outgoing connections. If ``None``, we let the OS pick the source IP. This is useful in some exotic networking configurations where your host has multiple IP addresses, and you want to force the use of a specific one. Note that if you pass an IPv4 ``local_address``, then you won't be able to connect to IPv6 hosts, and vice-versa. If you want to take advantage of this to force the use of IPv4 or IPv6 without specifying an exact source address, you can use the IPv4 wildcard address ``local_address="0.0.0.0"``, or the IPv6 wildcard address ``local_address="::"``. Returns: SocketStream: a :class:`~trio.abc.Stream` connected to the given server. Raises: OSError: if the connection fails. See also: open_ssl_over_tcp_stream """ # To keep our public API surface smaller, rule out some cases that # getaddrinfo will accept in some circumstances, but that act weird or # have non-portable behavior or are just plain not useful. # No type check on host though b/c we want to allow bytes-likes. if host is None: raise ValueError("host cannot be None") if not isinstance(port, int): raise TypeError("port must be int, not {!r}".format(port)) if happy_eyeballs_delay is None: happy_eyeballs_delay = DEFAULT_DELAY > targets = await getaddrinfo(host, port, type=SOCK_STREAM) /usr/lib/python3.11/site-packages/trio/_highlevel_open_tcp_stream.py:259: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 async def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Look up a numeric address given a name. Arguments and return values are identical to :func:`socket.getaddrinfo`, except that this version is async. Also, :func:`trio.socket.getaddrinfo` correctly uses IDNA 2008 to process non-ASCII domain names. (:func:`socket.getaddrinfo` uses IDNA 2003, which can give the wrong result in some cases and cause you to connect to a different host than the one you intended; see `bpo-17305 `__.) This function's behavior can be customized using :func:`set_custom_hostname_resolver`. """ # If host and port are numeric, then getaddrinfo doesn't block and we can # skip the whole thread thing, which seems worthwhile. So we try first # with the _NUMERIC_ONLY flags set, and then only spawn a thread if that # fails with EAI_NONAME: def numeric_only_failure(exc): return ( isinstance(exc, _stdlib_socket.gaierror) and exc.errno == _stdlib_socket.EAI_NONAME ) async with _try_sync(numeric_only_failure): return _stdlib_socket.getaddrinfo( host, port, family, type, proto, flags | _NUMERIC_ONLY ) # That failed; it's a real hostname. We better use a thread. # # Also, it might be a unicode hostname, in which case we want to do our # own encoding using the idna module, rather than letting Python do # it. (Python will use the old IDNA 2003 standard, and possibly get the # wrong answer - see bpo-17305). However, the idna module is picky, and # will refuse to process some valid hostname strings, like "::1". So if # it's already ascii, we pass it through; otherwise, we encode it to. if isinstance(host, str): try: host = host.encode("ascii") except UnicodeEncodeError: # UTS-46 defines various normalizations; in particular, by default # idna.encode will error out if the hostname has Capital Letters # in it; with uts46=True it will lowercase them instead. host = _idna.encode(host, uts46=True) hr = _resolver.get(None) if hr is not None: return await hr.getaddrinfo(host, port, family, type, proto, flags) else: > return await trio.to_thread.run_sync( _stdlib_socket.getaddrinfo, host, port, family, type, proto, flags, cancellable=True, ) /usr/lib/python3.11/site-packages/trio/_socket.py:183: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ sync_fn = , cancellable = True limiter = args = (b'cloudflare-dns.com', 443, 0, , 0, 0) name = 'trio.to_thread.run_sync-2' @enable_ki_protection async def to_thread_run_sync(sync_fn, *args, cancellable=False, limiter=None): """Convert a blocking operation into an async operation using a thread. These two lines are equivalent:: sync_fn(*args) await trio.to_thread.run_sync(sync_fn, *args) except that if ``sync_fn`` takes a long time, then the first line will block the Trio loop while it runs, while the second line allows other Trio tasks to continue working while ``sync_fn`` runs. This is accomplished by pushing the call to ``sync_fn(*args)`` off into a worker thread. From inside the worker thread, you can get back into Trio using the functions in `trio.from_thread`. Args: sync_fn: An arbitrary synchronous callable. *args: Positional arguments to pass to sync_fn. If you need keyword arguments, use :func:`functools.partial`. cancellable (bool): Whether to allow cancellation of this operation. See discussion below. limiter (None, or CapacityLimiter-like object): An object used to limit the number of simultaneous threads. Most commonly this will be a `~trio.CapacityLimiter`, but it could be anything providing compatible :meth:`~trio.CapacityLimiter.acquire_on_behalf_of` and :meth:`~trio.CapacityLimiter.release_on_behalf_of` methods. This function will call ``acquire_on_behalf_of`` before starting the thread, and ``release_on_behalf_of`` after the thread has finished. If None (the default), uses the default `~trio.CapacityLimiter`, as returned by :func:`current_default_thread_limiter`. **Cancellation handling**: Cancellation is a tricky issue here, because neither Python nor the operating systems it runs on provide any general mechanism for cancelling an arbitrary synchronous function running in a thread. This function will always check for cancellation on entry, before starting the thread. But once the thread is running, there are two ways it can handle being cancelled: * If ``cancellable=False``, the function ignores the cancellation and keeps going, just like if we had called ``sync_fn`` synchronously. This is the default behavior. * If ``cancellable=True``, then this function immediately raises `~trio.Cancelled`. In this case **the thread keeps running in background** – we just abandon it to do whatever it's going to do, and silently discard any return value or errors that it raises. Only use this if you know that the operation is safe and side-effect free. (For example: :func:`trio.socket.getaddrinfo` uses a thread with ``cancellable=True``, because it doesn't really affect anything if a stray hostname lookup keeps running in the background.) The ``limiter`` is only released after the thread has *actually* finished – which in the case of cancellation may be some time after this function has returned. If :func:`trio.run` finishes before the thread does, then the limiter release method will never be called at all. .. warning:: You should not use this function to call long-running CPU-bound functions! In addition to the usual GIL-related reasons why using threads for CPU-bound work is not very effective in Python, there is an additional problem: on CPython, `CPU-bound threads tend to "starve out" IO-bound threads `__, so using threads for CPU-bound work is likely to adversely affect the main thread running Trio. If you need to do this, you're better off using a worker process, or perhaps PyPy (which still has a GIL, but may do a better job of fairly allocating CPU time between threads). Returns: Whatever ``sync_fn(*args)`` returns. Raises: Exception: Whatever ``sync_fn(*args)`` raises. """ await trio.lowlevel.checkpoint_if_cancelled() cancellable = bool(cancellable) # raise early if cancellable.__bool__ raises if limiter is None: limiter = current_default_thread_limiter() # Holds a reference to the task that's blocked in this function waiting # for the result – or None if this function was cancelled and we should # discard the result. task_register = [trio.lowlevel.current_task()] name = f"trio.to_thread.run_sync-{next(_thread_counter)}" placeholder = ThreadPlaceholder(name) # This function gets scheduled into the Trio run loop to deliver the # thread's result. def report_back_in_trio_thread_fn(result): def do_release_then_return_result(): # release_on_behalf_of is an arbitrary user-defined method, so it # might raise an error. If it does, we want that error to # replace the regular return value, and if the regular return was # already an exception then we want them to chain. try: return result.unwrap() finally: limiter.release_on_behalf_of(placeholder) result = outcome.capture(do_release_then_return_result) if task_register[0] is not None: trio.lowlevel.reschedule(task_register[0], result) current_trio_token = trio.lowlevel.current_trio_token() def worker_fn(): current_async_library_cvar.set(None) TOKEN_LOCAL.token = current_trio_token try: ret = sync_fn(*args) if inspect.iscoroutine(ret): # Manually close coroutine to avoid RuntimeWarnings ret.close() raise TypeError( "Trio expected a sync function, but {!r} appears to be " "asynchronous".format(getattr(sync_fn, "__qualname__", sync_fn)) ) return ret finally: del TOKEN_LOCAL.token context = contextvars.copy_context() contextvars_aware_worker_fn = functools.partial(context.run, worker_fn) def deliver_worker_fn_result(result): try: current_trio_token.run_sync_soon(report_back_in_trio_thread_fn, result) except trio.RunFinishedError: # The entire run finished, so the task we're trying to contact is # certainly long gone -- it must have been cancelled and abandoned # us. pass await limiter.acquire_on_behalf_of(placeholder) try: start_thread_soon(contextvars_aware_worker_fn, deliver_worker_fn_result) except: limiter.release_on_behalf_of(placeholder) raise def abort(_): if cancellable: task_register[0] = None return trio.lowlevel.Abort.SUCCEEDED else: return trio.lowlevel.Abort.FAILED > return await trio.lowlevel.wait_task_rescheduled(abort) /usr/lib/python3.11/site-packages/trio/_threads.py:215: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ abort_func = .abort at 0x3fd9caf740> async def wait_task_rescheduled(abort_func): """Put the current task to sleep, with cancellation support. This is the lowest-level API for blocking in Trio. Every time a :class:`~trio.lowlevel.Task` blocks, it does so by calling this function (usually indirectly via some higher-level API). This is a tricky interface with no guard rails. If you can use :class:`ParkingLot` or the built-in I/O wait functions instead, then you should. Generally the way it works is that before calling this function, you make arrangements for "someone" to call :func:`reschedule` on the current task at some later point. Then you call :func:`wait_task_rescheduled`, passing in ``abort_func``, an "abort callback". (Terminology: in Trio, "aborting" is the process of attempting to interrupt a blocked task to deliver a cancellation.) There are two possibilities for what happens next: 1. "Someone" calls :func:`reschedule` on the current task, and :func:`wait_task_rescheduled` returns or raises whatever value or error was passed to :func:`reschedule`. 2. The call's context transitions to a cancelled state (e.g. due to a timeout expiring). When this happens, the ``abort_func`` is called. Its interface looks like:: def abort_func(raise_cancel): ... return trio.lowlevel.Abort.SUCCEEDED # or FAILED It should attempt to clean up any state associated with this call, and in particular, arrange that :func:`reschedule` will *not* be called later. If (and only if!) it is successful, then it should return :data:`Abort.SUCCEEDED`, in which case the task will automatically be rescheduled with an appropriate :exc:`~trio.Cancelled` error. Otherwise, it should return :data:`Abort.FAILED`. This means that the task can't be cancelled at this time, and still has to make sure that "someone" eventually calls :func:`reschedule`. At that point there are again two possibilities. You can simply ignore the cancellation altogether: wait for the operation to complete and then reschedule and continue as normal. (For example, this is what :func:`trio.to_thread.run_sync` does if cancellation is disabled.) The other possibility is that the ``abort_func`` does succeed in cancelling the operation, but for some reason isn't able to report that right away. (Example: on Windows, it's possible to request that an async ("overlapped") I/O operation be cancelled, but this request is *also* asynchronous – you don't find out until later whether the operation was actually cancelled or not.) To report a delayed cancellation, then you should reschedule the task yourself, and call the ``raise_cancel`` callback passed to ``abort_func`` to raise a :exc:`~trio.Cancelled` (or possibly :exc:`KeyboardInterrupt`) exception into this task. Either of the approaches sketched below can work:: # Option 1: # Catch the exception from raise_cancel and inject it into the task. # (This is what Trio does automatically for you if you return # Abort.SUCCEEDED.) trio.lowlevel.reschedule(task, outcome.capture(raise_cancel)) # Option 2: # wait to be woken by "someone", and then decide whether to raise # the error from inside the task. outer_raise_cancel = None def abort(inner_raise_cancel): nonlocal outer_raise_cancel outer_raise_cancel = inner_raise_cancel TRY_TO_CANCEL_OPERATION() return trio.lowlevel.Abort.FAILED await wait_task_rescheduled(abort) if OPERATION_WAS_SUCCESSFULLY_CANCELLED: # raises the error outer_raise_cancel() In any case it's guaranteed that we only call the ``abort_func`` at most once per call to :func:`wait_task_rescheduled`. Sometimes, it's useful to be able to share some mutable sleep-related data between the sleeping task, the abort function, and the waking task. You can use the sleeping task's :data:`~Task.custom_sleep_data` attribute to store this data, and Trio won't touch it, except to make sure that it gets cleared when the task is rescheduled. .. warning:: If your ``abort_func`` raises an error, or returns any value other than :data:`Abort.SUCCEEDED` or :data:`Abort.FAILED`, then Trio will crash violently. Be careful! Similarly, it is entirely possible to deadlock a Trio program by failing to reschedule a blocked task, or cause havoc by calling :func:`reschedule` too many times. Remember what we said up above about how you should use a higher-level API if at all possible? """ > return (await _async_yield(WaitTaskRescheduled(abort_func))).unwrap() /usr/lib/python3.11/site-packages/trio/_core/_traps.py:166: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def unwrap(self): self._set_unwrapped() # Tracebacks show the 'raise' line below out of context, so let's give # this variable a name that makes sense out of context. captured_error = self.error try: > raise captured_error /usr/lib/python3.11/site-packages/outcome/_impl.py:138: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def do_release_then_return_result(): # release_on_behalf_of is an arbitrary user-defined method, so it # might raise an error. If it does, we want that error to # replace the regular return value, and if the regular return was # already an exception then we want them to chain. try: > return result.unwrap() /usr/lib/python3.11/site-packages/trio/_threads.py:161: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def unwrap(self): self._set_unwrapped() # Tracebacks show the 'raise' line below out of context, so let's give # this variable a name that makes sense out of context. captured_error = self.error try: > raise captured_error /usr/lib/python3.11/site-packages/outcome/_impl.py:138: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def worker_fn(): current_async_library_cvar.set(None) TOKEN_LOCAL.token = current_trio_token try: > ret = sync_fn(*args) /usr/lib/python3.11/site-packages/trio/_threads.py:175: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = b'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request( self, request: Request, ) -> Response: assert isinstance(request.stream, AsyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = await self._pool.handle_async_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:353: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: response = await connection.handle_async_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. async with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() await self._attempt_to_acquire_connection(status) except BaseException as exc: await self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = async def handle_async_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) async with self._pool_lock: self._requests.append(status) await self._close_expired_connections() await self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = await status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. async with self._pool_lock: self._requests.remove(status) raise exc try: > response = await connection.handle_async_request(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: stream = await self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import AsyncHTTP2Connection self._connection = AsyncHTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = AsyncHTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def handle_async_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) async with self._request_lock: if self._connection is None: try: > stream = await self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = async def _connect(self, request: Request) -> AsyncNetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } async with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = await self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_async/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: Optional[float] = None, local_address: Optional[str] = None, ) -> AsyncNetworkStream: await self._init_backend() > return await self._backend.connect_tcp( host, port, timeout=timeout, local_address=local_address ) /usr/lib/python3.11/site-packages/httpcore/backends/auto.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None async def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> AsyncNetworkStream: timeout_or_inf = float("inf") if timeout is None else timeout exc_map = { trio.TooSlowError: ConnectTimeout, trio.BrokenResourceError: ConnectError, OSError: ConnectError, } # Trio supports 'local_address' from 0.16.1 onwards. # We only include the keyword argument if a local_address # argument has been passed. kwargs: dict = {} if local_address is None else {"local_address": local_address} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/trio.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = @unittest.skipIf(not dns.query._have_httpx, "httpx not available") def testDOHPostRequest(self): if self.backend.name() == "curio": self.skipTest("anyio dropped curio support") async def run(): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) r = await dns.asyncquery.https(q, nameserver_url, post=True, timeout=4) self.assertTrue(q.is_response(r)) > self.async_run(run) tests/test_async.py:499: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:496: in run r = await dns.asyncquery.https(q, nameserver_url, post=True, timeout=4) dns/asyncquery.py:548: in https response = await the_client.post( /usr/lib/python3.11/site-packages/httpx/_client.py:1842: in post return await self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:1527: in request return await self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:1614: in send response = await self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:1642: in _send_handling_auth response = await self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:1679: in _send_handling_redirects response = await self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1716: in _send_single_request response = await transport.handle_async_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:352: in handle_async_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError _________________________ TrioAsyncTests.testQueryTLS __________________________ self = @unittest.skipIf(not _ssl_available, "SSL not available") def testQueryTLS(self): for address in query_addresses: qname = dns.name.from_text("dns.google.") async def run(): q = dns.message.make_query(qname, dns.rdatatype.A) return await dns.asyncquery.tls(q, address, timeout=2) > response = self.async_run(run) tests/test_async.py:349: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:347: in run return await dns.asyncquery.tls(q, address, timeout=2) dns/asyncquery.py:466: in tls async with cm as s: dns/_asyncbackend.py:42: in __aexit__ await self.close() dns/_trio_backend.py:71: in close await self.stream.aclose() /usr/lib/python3.11/site-packages/trio/_ssl.py:775: in aclose await self._handshook.ensure(checkpoint=False) /usr/lib/python3.11/site-packages/trio/_ssl.py:221: in ensure await self._afn(*self._args) /usr/lib/python3.11/site-packages/trio/_ssl.py:610: in _do_handshake await self._retry(self._ssl_object.do_handshake, is_handshake=True) /usr/lib/python3.11/site-packages/trio/_ssl.py:574: in _retry await self.transport_stream.send_all(to_send) /usr/lib/python3.11/site-packages/trio/_highlevel_socket.py:100: in send_all with _translate_socket_errors_to_stream_errors(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextmanager def _translate_socket_errors_to_stream_errors(): try: yield except OSError as exc: if exc.errno in _closed_stream_errnos: > raise trio.ClosedResourceError("this socket was already closed") from None E trio.ClosedResourceError: this socket was already closed /usr/lib/python3.11/site-packages/trio/_highlevel_socket.py:31: ClosedResourceError ____________________ TrioAsyncTests.testQueryTLSWithSocket _____________________ self = @unittest.skipIf(not _ssl_available, "SSL not available") def testQueryTLSWithSocket(self): for address in query_addresses: qname = dns.name.from_text("dns.google.") async def run(): ssl_context = ssl.create_default_context() ssl_context.check_hostname = False async with await self.backend.make_socket( dns.inet.af_for_address(address), socket.SOCK_STREAM, 0, None, (address, 853), 2, ssl_context, None, ) as s: # for basic coverage await s.getsockname() q = dns.message.make_query(qname, dns.rdatatype.A) return await dns.asyncquery.tls(q, "8.8.8.8", sock=s, timeout=2) > response = self.async_run(run) tests/test_async.py:381: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:366: in run async with await self.backend.make_socket( dns/_asyncbackend.py:42: in __aexit__ await self.close() dns/_trio_backend.py:71: in close await self.stream.aclose() /usr/lib/python3.11/site-packages/trio/_ssl.py:775: in aclose await self._handshook.ensure(checkpoint=False) /usr/lib/python3.11/site-packages/trio/_ssl.py:221: in ensure await self._afn(*self._args) /usr/lib/python3.11/site-packages/trio/_ssl.py:610: in _do_handshake await self._retry(self._ssl_object.do_handshake, is_handshake=True) /usr/lib/python3.11/site-packages/trio/_ssl.py:574: in _retry await self.transport_stream.send_all(to_send) /usr/lib/python3.11/site-packages/trio/_highlevel_socket.py:100: in send_all with _translate_socket_errors_to_stream_errors(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextmanager def _translate_socket_errors_to_stream_errors(): try: yield except OSError as exc: if exc.errno in _closed_stream_errnos: > raise trio.ClosedResourceError("this socket was already closed") from None E trio.ClosedResourceError: this socket was already closed /usr/lib/python3.11/site-packages/trio/_highlevel_socket.py:31: ClosedResourceError __________________________ TrioAsyncTests.testResolve __________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolve(self): async def run(): answer = await dns.asyncresolver.resolve("dns.google.", "A") return set([rdata.address for rdata in answer]) > seen = self.async_run(run) tests/test_async.py:178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:175: in run answer = await dns.asyncresolver.resolve("dns.google.", "A") dns/asyncresolver.py:223: in resolve return await get_default_resolver().resolve( dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ______________________ TrioAsyncTests.testResolveAddress _______________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveAddress(self): async def run(): return await dns.asyncresolver.resolve_address("8.8.8.8") > answer = self.async_run(run) tests/test_async.py:186: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:184: in run return await dns.asyncresolver.resolve_address("8.8.8.8") dns/asyncresolver.py:246: in resolve_address return await get_default_resolver().resolve_address(ipaddr, *args, **kwargs) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ________________________ TrioAsyncTests.testResolverDOH ________________________ self = @unittest.skipIf(not dns.query._have_httpx, "httpx not available") def testResolverDOH(self): if self.backend.name() == "curio": self.skipTest("anyio dropped curio support") async def run(): res = dns.asyncresolver.Resolver(configure=False) res.nameservers = ["https://dns.google/dns-query"] answer = await res.resolve("dns.google", "A", backend=self.backend) seen = set([rdata.address for rdata in answer]) self.assertTrue("8.8.8.8" in seen) self.assertTrue("8.8.4.4" in seen) > self.async_run(run) tests/test_async.py:514: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:509: in run answer = await res.resolve("dns.google", "A", backend=self.backend) dns/asyncresolver.py:89: in resolve timeout = self._compute_timeout(start, lifetime, resolution.errors) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = start = 1681629393.809039, lifetime = 5.0 errors = [('https://dns.google/dns-query', False, 53, ConnectError('[Errno -3] Temporary failure in name resolution'), None), (...e), ('https://dns.google/dns-query', False, 53, ConnectError('[Errno -3] Temporary failure in name resolution'), None)] def _compute_timeout( self, start: float, lifetime: Optional[float] = None, errors: Optional[List[ErrorTuple]] = None, ) -> float: lifetime = self.lifetime if lifetime is None else lifetime now = time.time() duration = now - start if errors is None: errors = [] if duration < 0: if duration < -1: # Time going backwards is bad. Just give up. raise LifetimeTimeout(timeout=duration, errors=errors) else: # Time went backwards, but only a little. This can # happen, e.g. under vmware with older linux kernels. # Pretend it didn't happen. duration = 0 if duration >= lifetime: > raise LifetimeTimeout(timeout=duration, errors=errors) E dns.resolver.LifetimeTimeout: The resolution lifetime expired after 5.697 seconds: Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution dns/resolver.py:988: LifetimeTimeout _______________________ TrioAsyncTests.testZoneForName1 ________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName1(self): async def run(): name = dns.name.from_text("www.dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:223: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:220: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ TrioAsyncTests.testZoneForName2 ________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName2(self): async def run(): name = dns.name.from_text("a.b.www.dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:232: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:229: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ TrioAsyncTests.testZoneForName3 ________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName3(self): async def run(): name = dns.name.from_text("dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:241: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:595: in async_run return trio.run(afunc) tests/test_async.py:238: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ TrioAsyncTests.testZoneForName4 ________________________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName4(self): def bad(): name = dns.name.from_text("dnspython.org", None) async def run(): return await dns.asyncresolver.zone_for_name(name) self.async_run(run) > self.assertRaises(dns.resolver.NotAbsolute, bad) tests/test_async.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:251: in bad self.async_run(run) tests/test_async.py:595: in async_run return trio.run(afunc) /usr/lib/python3.11/site-packages/trio/_core/_run.py:2010: in run raise runner.main_task_outcome.error tests/test_async.py:249: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________________ CurioAsyncTests.testCanonicalNameCNAME ____________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameCNAME(self): name = dns.name.from_text("www.dnspython.org") cname = dns.name.from_text("dmfrjf4ips8xa.cloudfront.net") async def run(): return await dns.asyncresolver.canonical_name(name) > self.assertEqual(self.async_run(run), cname) tests/test_async.py:205: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:203: in run return await dns.asyncresolver.canonical_name(name) dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration __________________ CurioAsyncTests.testCanonicalNameDangling ___________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = @unittest.skipIf(_systemd_resolved_present, "systemd-resolved in use") def testCanonicalNameDangling(self): name = dns.name.from_text("dangling-cname.dnspython.org") cname = dns.name.from_text("dangling-target.dnspython.org") async def run(): return await dns.asyncresolver.canonical_name(name) > self.assertEqual(self.async_run(run), cname) tests/test_async.py:215: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:213: in run return await dns.asyncresolver.canonical_name(name) dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ CurioAsyncTests.testCanonicalNameNoCNAME ___________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameNoCNAME(self): cname = dns.name.from_text("www.google.com") async def run(): return await dns.asyncresolver.canonical_name("www.google.com") > self.assertEqual(self.async_run(run), cname) tests/test_async.py:196: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:194: in run return await dns.asyncresolver.canonical_name("www.google.com") dns/asyncresolver.py:256: in canonical_name return await get_default_resolver().canonical_name(name) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________________ CurioAsyncTests.testQueryTLS _________________________ self = @unittest.skipIf(not _ssl_available, "SSL not available") def testQueryTLS(self): for address in query_addresses: qname = dns.name.from_text("dns.google.") async def run(): q = dns.message.make_query(qname, dns.rdatatype.A) return await dns.asyncquery.tls(q, address, timeout=2) > response = self.async_run(run) tests/test_async.py:349: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:347: in run return await dns.asyncquery.tls(q, address, timeout=2) dns/asyncquery.py:456: in tls cm = await backend.make_socket( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = af = , socktype = proto = 0, source = None, destination = ('8.8.8.8', 853), timeout = 2 ssl_context = , server_hostname = None async def make_socket( self, af, socktype, proto=0, source=None, destination=None, timeout=None, ssl_context=None, server_hostname=None, ): if socktype == socket.SOCK_DGRAM: s = curio.socket.socket(af, socktype, proto) try: if source: s.bind(_lltuple(source, af)) except Exception: # pragma: no cover await s.close() raise return DatagramSocket(s) elif socktype == socket.SOCK_STREAM: if source: source_addr = _lltuple(source, af) else: source_addr = None async with _maybe_timeout(timeout): s = await curio.open_connection( destination[0], destination[1], ssl=ssl_context, source_addr=source_addr, server_hostname=server_hostname, ) > return StreamSocket(s) E UnboundLocalError: cannot access local variable 's' where it is not associated with a value dns/_curio_backend.py:116: UnboundLocalError ____________________ CurioAsyncTests.testQueryTLSWithSocket ____________________ self = @unittest.skipIf(not _ssl_available, "SSL not available") def testQueryTLSWithSocket(self): for address in query_addresses: qname = dns.name.from_text("dns.google.") async def run(): ssl_context = ssl.create_default_context() ssl_context.check_hostname = False async with await self.backend.make_socket( dns.inet.af_for_address(address), socket.SOCK_STREAM, 0, None, (address, 853), 2, ssl_context, None, ) as s: # for basic coverage await s.getsockname() q = dns.message.make_query(qname, dns.rdatatype.A) return await dns.asyncquery.tls(q, "8.8.8.8", sock=s, timeout=2) > response = self.async_run(run) tests/test_async.py:381: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:366: in run async with await self.backend.make_socket( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = af = , socktype = proto = 0, source = None, destination = ('8.8.8.8', 853), timeout = 2 ssl_context = , server_hostname = None async def make_socket( self, af, socktype, proto=0, source=None, destination=None, timeout=None, ssl_context=None, server_hostname=None, ): if socktype == socket.SOCK_DGRAM: s = curio.socket.socket(af, socktype, proto) try: if source: s.bind(_lltuple(source, af)) except Exception: # pragma: no cover await s.close() raise return DatagramSocket(s) elif socktype == socket.SOCK_STREAM: if source: source_addr = _lltuple(source, af) else: source_addr = None async with _maybe_timeout(timeout): s = await curio.open_connection( destination[0], destination[1], ssl=ssl_context, source_addr=source_addr, server_hostname=server_hostname, ) > return StreamSocket(s) E UnboundLocalError: cannot access local variable 's' where it is not associated with a value dns/_curio_backend.py:116: UnboundLocalError _________________________ CurioAsyncTests.testResolve __________________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolve(self): async def run(): answer = await dns.asyncresolver.resolve("dns.google.", "A") return set([rdata.address for rdata in answer]) > seen = self.async_run(run) tests/test_async.py:178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:175: in run answer = await dns.asyncresolver.resolve("dns.google.", "A") dns/asyncresolver.py:223: in resolve return await get_default_resolver().resolve( dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ______________________ CurioAsyncTests.testResolveAddress ______________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveAddress(self): async def run(): return await dns.asyncresolver.resolve_address("8.8.8.8") > answer = self.async_run(run) tests/test_async.py:186: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:184: in run return await dns.asyncresolver.resolve_address("8.8.8.8") dns/asyncresolver.py:246: in resolve_address return await get_default_resolver().resolve_address(ipaddr, *args, **kwargs) dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ CurioAsyncTests.testZoneForName1 _______________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName1(self): async def run(): name = dns.name.from_text("www.dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:223: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:220: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ CurioAsyncTests.testZoneForName2 _______________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName2(self): async def run(): name = dns.name.from_text("a.b.www.dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:232: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:229: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ CurioAsyncTests.testZoneForName3 _______________________ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName3(self): async def run(): name = dns.name.from_text("dnspython.org.") return await dns.asyncresolver.zone_for_name(name) ezname = dns.name.from_text("dnspython.org.") > zname = self.async_run(run) tests/test_async.py:241: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:238: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ CurioAsyncTests.testZoneForName4 _______________________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName4(self): def bad(): name = dns.name.from_text("dnspython.org", None) async def run(): return await dns.asyncresolver.zone_for_name(name) self.async_run(run) > self.assertRaises(dns.resolver.NotAbsolute, bad) tests/test_async.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_async.py:251: in bad self.async_run(run) tests/test_async.py:629: in async_run return kernel.run(afunc, shutdown=True) /usr/lib/python3.11/site-packages/curio/kernel.py:172: in run raise ret_exc /usr/lib/python3.11/site-packages/curio/kernel.py:738: in kernel_run trap = current.send(current._trap_result) /usr/lib/python3.11/site-packages/curio/task.py:167: in send return self._send(value) /usr/lib/python3.11/site-packages/curio/task.py:171: in _task_runner return await coro tests/test_async.py:249: in run return await dns.asyncresolver.zone_for_name(name) dns/asyncresolver.py:275: in zone_for_name resolver = get_default_resolver() dns/asyncresolver.py:186: in get_default_resolver reset_default_resolver() dns/asyncresolver.py:199: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _____________ DNSOverHTTPSTestCaseRequests.test_build_url_from_ip ______________ self = def _new_conn(self): """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: > conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw ) /usr/lib/python3.11/site-packages/urllib3/connection.py:174: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('1.1.1.1', 443), timeout = 4, source_address = None socket_options = [(6, 1, 1)] def create_connection( address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None, ): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`socket.getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. An host of '' or port 0 tells the OS to use the default. """ host, port = address if host.startswith("["): host = host.strip("[]") err = None # Using the value from allowed_gai_family() in the context of getaddrinfo lets # us select whether to work with IPv4 DNS records, IPv6 records, or both. # The original create_connection function always returns all records. family = allowed_gai_family() try: host.encode("idna") except UnicodeError: return six.raise_from( LocationParseError(u"'%s', label empty or too long" % host), None ) for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) # If provided, set socket level options before connecting. _set_socket_options(sock, socket_options) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error as e: err = e if sock is not None: sock.close() sock = None if err is not None: > raise err /usr/lib/python3.11/site-packages/urllib3/util/connection.py:95: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('1.1.1.1', 443), timeout = 4, source_address = None socket_options = [(6, 1, 1)] def create_connection( address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None, ): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`socket.getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. An host of '' or port 0 tells the OS to use the default. """ host, port = address if host.startswith("["): host = host.strip("[]") err = None # Using the value from allowed_gai_family() in the context of getaddrinfo lets # us select whether to work with IPv4 DNS records, IPv6 records, or both. # The original create_connection function always returns all records. family = allowed_gai_family() try: host.encode("idna") except UnicodeError: return six.raise_from( LocationParseError(u"'%s', label empty or too long" % host), None ) for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) # If provided, set socket level options before connecting. _set_socket_options(sock, socket_options) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) > sock.connect(sa) E TimeoutError: timed out /usr/lib/python3.11/site-packages/urllib3/util/connection.py:85: TimeoutError During handling of the above exception, another exception occurred: self = method = 'GET', url = '/dns-query?dns=u08BAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE' body = None headers = {'User-Agent': 'python-requests/2.28.2', 'Accept-Encoding': 'gzip, deflate', 'accept': 'application/dns-message', 'Connection': 'keep-alive'} retries = Retry(total=0, connect=None, read=False, redirect=None, status=None) redirect = False, assert_same_host = False timeout = Timeout(connect=4, read=4, total=None), pool_timeout = None release_conn = False, chunked = False, body_pos = None response_kw = {'decode_content': False, 'preload_content': False} parsed_url = Url(scheme=None, auth=None, host=None, port=None, path='/dns-query', query='dns=u08BAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE', fragment=None) destination_scheme = None, conn = None, release_this_conn = True http_tunnel_required = False, err = None, clean_exit = False def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param url: The URL to perform the request on. :param body: Data to send in the request body, either :class:`str`, :class:`bytes`, an iterable of :class:`str`/:class:`bytes`, or a file-like object. :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When ``False``, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ parsed_url = parse_url(url) destination_scheme = parsed_url.scheme if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) # Ensure that the URL we're connecting to is properly encoded if url.startswith("/"): url = six.ensure_str(_encode_target(url)) else: url = six.ensure_str(parsed_url.url) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] release_this_conn = release_conn http_tunnel_required = connection_requires_http_tunnel( self.proxy, self.proxy_config, destination_scheme ) # Merge the proxy headers. Only done when not using HTTP CONNECT. We # have to copy the headers dict so we can safely change it without those # changes being reflected in anyone else's copy. if not http_tunnel_required: headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr( conn, "sock", None ) if is_new_proxy_conn and http_tunnel_required: self._prepare_proxy(conn) # Make the request on the httplib connection object. > httplib_response = self._make_request( conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked, ) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:703: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = conn = method = 'GET', url = '/dns-query?dns=u08BAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE' timeout = Timeout(connect=4, read=4, total=None), chunked = False httplib_request_kw = {'body': None, 'headers': {'User-Agent': 'python-requests/2.28.2', 'Accept-Encoding': 'gzip, deflate', 'accept': 'application/dns-message', 'Connection': 'keep-alive'}} timeout_obj = Timeout(connect=4, read=4, total=None) def _make_request( self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw ): """ Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout # Trigger any extra validation we need to do. try: > self._validate_conn(conn) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:386: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = conn = def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, "sock", None): # AppEngine might not have `.sock` > conn.connect() /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:1042: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def connect(self): # Add certificate verification > self.sock = conn = self._new_conn() /usr/lib/python3.11/site-packages/urllib3/connection.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def _new_conn(self): """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw ) except SocketTimeout: > raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout), ) E urllib3.exceptions.ConnectTimeoutError: (, 'Connection to 1.1.1.1 timed out. (connect timeout=4)') /usr/lib/python3.11/site-packages/urllib3/connection.py:179: ConnectTimeoutError During handling of the above exception, another exception occurred: self = request = , stream = False timeout = Timeout(connect=4, read=4, total=None), verify = True, cert = None proxies = OrderedDict() def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection(request.url, proxies) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers( request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, ) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError: raise ValueError( f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " f"or a single float to set both timeouts to the same value." ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: > resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, ) /usr/lib/python3.11/site-packages/requests/adapters.py:489: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = method = 'GET', url = '/dns-query?dns=u08BAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE' body = None headers = {'User-Agent': 'python-requests/2.28.2', 'Accept-Encoding': 'gzip, deflate', 'accept': 'application/dns-message', 'Connection': 'keep-alive'} retries = Retry(total=0, connect=None, read=False, redirect=None, status=None) redirect = False, assert_same_host = False timeout = Timeout(connect=4, read=4, total=None), pool_timeout = None release_conn = False, chunked = False, body_pos = None response_kw = {'decode_content': False, 'preload_content': False} parsed_url = Url(scheme=None, auth=None, host=None, port=None, path='/dns-query', query='dns=u08BAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE', fragment=None) destination_scheme = None, conn = None, release_this_conn = True http_tunnel_required = False, err = None, clean_exit = False def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param url: The URL to perform the request on. :param body: Data to send in the request body, either :class:`str`, :class:`bytes`, an iterable of :class:`str`/:class:`bytes`, or a file-like object. :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When ``False``, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ parsed_url = parse_url(url) destination_scheme = parsed_url.scheme if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) # Ensure that the URL we're connecting to is properly encoded if url.startswith("/"): url = six.ensure_str(_encode_target(url)) else: url = six.ensure_str(parsed_url.url) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] release_this_conn = release_conn http_tunnel_required = connection_requires_http_tunnel( self.proxy, self.proxy_config, destination_scheme ) # Merge the proxy headers. Only done when not using HTTP CONNECT. We # have to copy the headers dict so we can safely change it without those # changes being reflected in anyone else's copy. if not http_tunnel_required: headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr( conn, "sock", None ) if is_new_proxy_conn and http_tunnel_required: self._prepare_proxy(conn) # Make the request on the httplib connection object. httplib_response = self._make_request( conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked, ) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = conn if not release_conn else None # Pass method to Response for length checking response_kw["request_method"] = method # Import httplib's response into our own wrapper object response = self.ResponseCls.from_httplib( httplib_response, pool=self, connection=response_conn, retries=retries, **response_kw ) # Everything went great! clean_exit = True except EmptyPoolError: # Didn't get a connection from the pool, no need to clean up clean_exit = True release_this_conn = False raise except ( TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, CertificateError, ) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False def _is_ssl_error_message_from_http_proxy(ssl_error): # We're trying to detect the message 'WRONG_VERSION_NUMBER' but # SSLErrors are kinda all over the place when it comes to the message, # so we try to cover our bases here! message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) return ( "wrong version number" in message or "unknown protocol" in message ) # Try to detect a common user error with proxies which is to # set an HTTP proxy to be HTTPS when it should be 'http://' # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) # Instead we add a nice error message and point to a URL. if ( isinstance(e, BaseSSLError) and self.proxy and _is_ssl_error_message_from_http_proxy(e) and conn.proxy and conn.proxy.scheme == "https" ): e = ProxyError( "Your proxy appears to only use HTTP and not HTTPS, " "try changing your proxy URL to be HTTP. See: " "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#https-proxy-error-http-proxy", SSLError(e), ) elif isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError("Cannot connect to proxy.", e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError("Connection aborted.", e) > retries = retries.increment( method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] ) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:787: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = Retry(total=0, connect=None, read=False, redirect=None, status=None) method = 'GET', url = '/dns-query?dns=u08BAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE' response = None error = ConnectTimeoutError(, 'Connection to 1.1.1.1 timed out. (connect timeout=4)') _pool = _stacktrace = def increment( self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None, ): """Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object. """ if self.total is False and error: # Disabled, indicate to re-raise the error. raise six.reraise(type(error), error, _stacktrace) total = self.total if total is not None: total -= 1 connect = self.connect read = self.read redirect = self.redirect status_count = self.status other = self.other cause = "unknown" status = None redirect_location = None if error and self._is_connection_error(error): # Connect retry? if connect is False: raise six.reraise(type(error), error, _stacktrace) elif connect is not None: connect -= 1 elif error and self._is_read_error(error): # Read retry? if read is False or not self._is_method_retryable(method): raise six.reraise(type(error), error, _stacktrace) elif read is not None: read -= 1 elif error: # Other retry? if other is not None: other -= 1 elif response and response.get_redirect_location(): # Redirect retry? if redirect is not None: redirect -= 1 cause = "too many redirects" redirect_location = response.get_redirect_location() status = response.status else: # Incrementing because of a server error like a 500 in # status_forcelist and the given method is in the allowed_methods cause = ResponseError.GENERIC_ERROR if response and response.status: if status_count is not None: status_count -= 1 cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status) status = response.status history = self.history + ( RequestHistory(method, url, error, status, redirect_location), ) new_retry = self.new( total=total, connect=connect, read=read, redirect=redirect, status=status_count, other=other, history=history, ) if new_retry.is_exhausted(): > raise MaxRetryError(_pool, url, error or ResponseError(cause)) E urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='1.1.1.1', port=443): Max retries exceeded with url: /dns-query?dns=u08BAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE (Caused by ConnectTimeoutError(, 'Connection to 1.1.1.1 timed out. (connect timeout=4)')) /usr/lib/python3.11/site-packages/urllib3/util/retry.py:592: MaxRetryError During handling of the above exception, another exception occurred: self = def test_build_url_from_ip(self): self.assertTrue(resolver_v4_addresses or resolver_v6_addresses) if resolver_v4_addresses: nameserver_ip = random.choice(resolver_v4_addresses) q = dns.message.make_query("example.com.", dns.rdatatype.A) # For some reason Google's DNS over HTTPS fails when you POST to # https://8.8.8.8/dns-query # So we're just going to do GET requests here > r = dns.query.https( q, nameserver_ip, session=self.session, post=False, timeout=4 ) tests/test_doh.py:105: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:427: in https response = session.get( /usr/lib/python3.11/site-packages/requests/sessions.py:600: in get return self.request("GET", url, **kwargs) /usr/lib/python3.11/site-packages/requests/sessions.py:587: in request resp = self.send(prep, **send_kwargs) /usr/lib/python3.11/site-packages/requests/sessions.py:701: in send r = adapter.send(request, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = , stream = False timeout = Timeout(connect=4, read=4, total=None), verify = True, cert = None proxies = OrderedDict() def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection(request.url, proxies) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers( request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, ) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError: raise ValueError( f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " f"or a single float to set both timeouts to the same value." ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, ) # Send the request. else: if hasattr(conn, "proxy_pool"): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: skip_host = "Host" in request.headers low_conn.putrequest( request.method, url, skip_accept_encoding=True, skip_host=skip_host, ) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode("utf-8")) low_conn.send(b"\r\n") low_conn.send(i) low_conn.send(b"\r\n") low_conn.send(b"0\r\n\r\n") # Receive the response from the server r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False, ) except Exception: # If we hit any problems here, clean up the connection. # Then, raise so that we can handle the actual exception. low_conn.close() raise except (ProtocolError, OSError) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): > raise ConnectTimeout(e, request=request) E requests.exceptions.ConnectTimeout: HTTPSConnectionPool(host='1.1.1.1', port=443): Max retries exceeded with url: /dns-query?dns=u08BAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE (Caused by ConnectTimeoutError(, 'Connection to 1.1.1.1 timed out. (connect timeout=4)')) /usr/lib/python3.11/site-packages/requests/adapters.py:553: ConnectTimeout ________________ DNSOverHTTPSTestCaseRequests.test_get_request _________________ self = def _new_conn(self): """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: > conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw ) /usr/lib/python3.11/site-packages/urllib3/connection.py:174: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('dns.google', 443), timeout = 4, source_address = None socket_options = [(6, 1, 1)] def create_connection( address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None, ): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`socket.getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. An host of '' or port 0 tells the OS to use the default. """ host, port = address if host.startswith("["): host = host.strip("[]") err = None # Using the value from allowed_gai_family() in the context of getaddrinfo lets # us select whether to work with IPv4 DNS records, IPv6 records, or both. # The original create_connection function always returns all records. family = allowed_gai_family() try: host.encode("idna") except UnicodeError: return six.raise_from( LocationParseError(u"'%s', label empty or too long" % host), None ) > for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): /usr/lib/python3.11/site-packages/urllib3/util/connection.py:72: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'dns.google', port = 443, family = type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: self = method = 'GET', url = '/dns-query?dns=nCQBAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE' body = None headers = {'User-Agent': 'python-requests/2.28.2', 'Accept-Encoding': 'gzip, deflate', 'accept': 'application/dns-message', 'Connection': 'keep-alive'} retries = Retry(total=0, connect=None, read=False, redirect=None, status=None) redirect = False, assert_same_host = False timeout = Timeout(connect=4, read=4, total=None), pool_timeout = None release_conn = False, chunked = False, body_pos = None response_kw = {'decode_content': False, 'preload_content': False} parsed_url = Url(scheme=None, auth=None, host=None, port=None, path='/dns-query', query='dns=nCQBAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE', fragment=None) destination_scheme = None, conn = None, release_this_conn = True http_tunnel_required = False, err = None, clean_exit = False def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param url: The URL to perform the request on. :param body: Data to send in the request body, either :class:`str`, :class:`bytes`, an iterable of :class:`str`/:class:`bytes`, or a file-like object. :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When ``False``, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ parsed_url = parse_url(url) destination_scheme = parsed_url.scheme if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) # Ensure that the URL we're connecting to is properly encoded if url.startswith("/"): url = six.ensure_str(_encode_target(url)) else: url = six.ensure_str(parsed_url.url) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] release_this_conn = release_conn http_tunnel_required = connection_requires_http_tunnel( self.proxy, self.proxy_config, destination_scheme ) # Merge the proxy headers. Only done when not using HTTP CONNECT. We # have to copy the headers dict so we can safely change it without those # changes being reflected in anyone else's copy. if not http_tunnel_required: headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr( conn, "sock", None ) if is_new_proxy_conn and http_tunnel_required: self._prepare_proxy(conn) # Make the request on the httplib connection object. > httplib_response = self._make_request( conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked, ) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:703: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = conn = method = 'GET', url = '/dns-query?dns=nCQBAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE' timeout = Timeout(connect=4, read=4, total=None), chunked = False httplib_request_kw = {'body': None, 'headers': {'User-Agent': 'python-requests/2.28.2', 'Accept-Encoding': 'gzip, deflate', 'accept': 'application/dns-message', 'Connection': 'keep-alive'}} timeout_obj = Timeout(connect=4, read=4, total=None) def _make_request( self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw ): """ Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout # Trigger any extra validation we need to do. try: > self._validate_conn(conn) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:386: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = conn = def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, "sock", None): # AppEngine might not have `.sock` > conn.connect() /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:1042: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def connect(self): # Add certificate verification > self.sock = conn = self._new_conn() /usr/lib/python3.11/site-packages/urllib3/connection.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def _new_conn(self): """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw ) except SocketTimeout: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout), ) except SocketError as e: > raise NewConnectionError( self, "Failed to establish a new connection: %s" % e ) E urllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/urllib3/connection.py:186: NewConnectionError During handling of the above exception, another exception occurred: self = request = , stream = False timeout = Timeout(connect=4, read=4, total=None), verify = True, cert = None proxies = OrderedDict() def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection(request.url, proxies) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers( request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, ) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError: raise ValueError( f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " f"or a single float to set both timeouts to the same value." ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: > resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, ) /usr/lib/python3.11/site-packages/requests/adapters.py:489: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = method = 'GET', url = '/dns-query?dns=nCQBAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE' body = None headers = {'User-Agent': 'python-requests/2.28.2', 'Accept-Encoding': 'gzip, deflate', 'accept': 'application/dns-message', 'Connection': 'keep-alive'} retries = Retry(total=0, connect=None, read=False, redirect=None, status=None) redirect = False, assert_same_host = False timeout = Timeout(connect=4, read=4, total=None), pool_timeout = None release_conn = False, chunked = False, body_pos = None response_kw = {'decode_content': False, 'preload_content': False} parsed_url = Url(scheme=None, auth=None, host=None, port=None, path='/dns-query', query='dns=nCQBAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE', fragment=None) destination_scheme = None, conn = None, release_this_conn = True http_tunnel_required = False, err = None, clean_exit = False def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param url: The URL to perform the request on. :param body: Data to send in the request body, either :class:`str`, :class:`bytes`, an iterable of :class:`str`/:class:`bytes`, or a file-like object. :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When ``False``, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ parsed_url = parse_url(url) destination_scheme = parsed_url.scheme if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) # Ensure that the URL we're connecting to is properly encoded if url.startswith("/"): url = six.ensure_str(_encode_target(url)) else: url = six.ensure_str(parsed_url.url) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] release_this_conn = release_conn http_tunnel_required = connection_requires_http_tunnel( self.proxy, self.proxy_config, destination_scheme ) # Merge the proxy headers. Only done when not using HTTP CONNECT. We # have to copy the headers dict so we can safely change it without those # changes being reflected in anyone else's copy. if not http_tunnel_required: headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr( conn, "sock", None ) if is_new_proxy_conn and http_tunnel_required: self._prepare_proxy(conn) # Make the request on the httplib connection object. httplib_response = self._make_request( conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked, ) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = conn if not release_conn else None # Pass method to Response for length checking response_kw["request_method"] = method # Import httplib's response into our own wrapper object response = self.ResponseCls.from_httplib( httplib_response, pool=self, connection=response_conn, retries=retries, **response_kw ) # Everything went great! clean_exit = True except EmptyPoolError: # Didn't get a connection from the pool, no need to clean up clean_exit = True release_this_conn = False raise except ( TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, CertificateError, ) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False def _is_ssl_error_message_from_http_proxy(ssl_error): # We're trying to detect the message 'WRONG_VERSION_NUMBER' but # SSLErrors are kinda all over the place when it comes to the message, # so we try to cover our bases here! message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) return ( "wrong version number" in message or "unknown protocol" in message ) # Try to detect a common user error with proxies which is to # set an HTTP proxy to be HTTPS when it should be 'http://' # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) # Instead we add a nice error message and point to a URL. if ( isinstance(e, BaseSSLError) and self.proxy and _is_ssl_error_message_from_http_proxy(e) and conn.proxy and conn.proxy.scheme == "https" ): e = ProxyError( "Your proxy appears to only use HTTP and not HTTPS, " "try changing your proxy URL to be HTTP. See: " "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#https-proxy-error-http-proxy", SSLError(e), ) elif isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError("Cannot connect to proxy.", e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError("Connection aborted.", e) > retries = retries.increment( method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] ) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:787: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = Retry(total=0, connect=None, read=False, redirect=None, status=None) method = 'GET', url = '/dns-query?dns=nCQBAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE' response = None error = NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution') _pool = _stacktrace = def increment( self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None, ): """Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object. """ if self.total is False and error: # Disabled, indicate to re-raise the error. raise six.reraise(type(error), error, _stacktrace) total = self.total if total is not None: total -= 1 connect = self.connect read = self.read redirect = self.redirect status_count = self.status other = self.other cause = "unknown" status = None redirect_location = None if error and self._is_connection_error(error): # Connect retry? if connect is False: raise six.reraise(type(error), error, _stacktrace) elif connect is not None: connect -= 1 elif error and self._is_read_error(error): # Read retry? if read is False or not self._is_method_retryable(method): raise six.reraise(type(error), error, _stacktrace) elif read is not None: read -= 1 elif error: # Other retry? if other is not None: other -= 1 elif response and response.get_redirect_location(): # Redirect retry? if redirect is not None: redirect -= 1 cause = "too many redirects" redirect_location = response.get_redirect_location() status = response.status else: # Incrementing because of a server error like a 500 in # status_forcelist and the given method is in the allowed_methods cause = ResponseError.GENERIC_ERROR if response and response.status: if status_count is not None: status_count -= 1 cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status) status = response.status history = self.history + ( RequestHistory(method, url, error, status, redirect_location), ) new_retry = self.new( total=total, connect=connect, read=read, redirect=redirect, status=status_count, other=other, history=history, ) if new_retry.is_exhausted(): > raise MaxRetryError(_pool, url, error or ResponseError(cause)) E urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='dns.google', port=443): Max retries exceeded with url: /dns-query?dns=nCQBAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) /usr/lib/python3.11/site-packages/urllib3/util/retry.py:592: MaxRetryError During handling of the above exception, another exception occurred: self = def test_get_request(self): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) > r = dns.query.https( q, nameserver_url, session=self.session, post=False, timeout=4 ) tests/test_doh.py:84: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:427: in https response = session.get( /usr/lib/python3.11/site-packages/requests/sessions.py:600: in get return self.request("GET", url, **kwargs) /usr/lib/python3.11/site-packages/requests/sessions.py:587: in request resp = self.send(prep, **send_kwargs) /usr/lib/python3.11/site-packages/requests/sessions.py:701: in send r = adapter.send(request, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = , stream = False timeout = Timeout(connect=4, read=4, total=None), verify = True, cert = None proxies = OrderedDict() def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection(request.url, proxies) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers( request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, ) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError: raise ValueError( f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " f"or a single float to set both timeouts to the same value." ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, ) # Send the request. else: if hasattr(conn, "proxy_pool"): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: skip_host = "Host" in request.headers low_conn.putrequest( request.method, url, skip_accept_encoding=True, skip_host=skip_host, ) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode("utf-8")) low_conn.send(b"\r\n") low_conn.send(i) low_conn.send(b"\r\n") low_conn.send(b"0\r\n\r\n") # Receive the response from the server r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False, ) except Exception: # If we hit any problems here, clean up the connection. # Then, raise so that we can handle the actual exception. low_conn.close() raise except (ProtocolError, OSError) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) if isinstance(e.reason, _ProxyError): raise ProxyError(e, request=request) if isinstance(e.reason, _SSLError): # This branch is for urllib3 v1.22 and later. raise SSLError(e, request=request) > raise ConnectionError(e, request=request) E requests.exceptions.ConnectionError: HTTPSConnectionPool(host='dns.google', port=443): Max retries exceeded with url: /dns-query?dns=nCQBAAABAAAAAAAAB2V4YW1wbGUDY29tAAABAAE (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) /usr/lib/python3.11/site-packages/requests/adapters.py:565: ConnectionError ________________ DNSOverHTTPSTestCaseRequests.test_new_session _________________ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} with map_exceptions(exc_map): > sock = socket.create_connection( address, timeout, source_address=source_address ) /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('cloudflare-dns.com', 443), timeout = 4, source_address = None def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] > for res in getaddrinfo(host, port, 0, SOCK_STREAM): /usr/lib64/python3.11/socket.py:827: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request( self, request: Request, ) -> Response: assert isinstance(request.stream, SyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = self._pool.handle_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:218: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: response = connection.handle_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() self._attempt_to_acquire_connection(status) except BaseException as exc: self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: > response = connection.handle_request(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: stream = self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import HTTP2Connection self._connection = HTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = HTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: > stream = self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def _connect(self, request: Request) -> NetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:85: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = def test_new_session(self): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) > r = dns.query.https(q, nameserver_url, timeout=4) tests/test_doh.py:138: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:412: in https response = session.post( /usr/lib/python3.11/site-packages/httpx/_client.py:1130: in post return self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:815: in request return self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:902: in send response = self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:930: in _send_handling_auth response = self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:967: in _send_handling_redirects response = self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1003: in _send_single_request response = transport.handle_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:217: in handle_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError ________________ DNSOverHTTPSTestCaseRequests.test_post_request ________________ self = def _new_conn(self): """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: > conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw ) /usr/lib/python3.11/site-packages/urllib3/connection.py:174: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('cloudflare-dns.com', 443), timeout = 4, source_address = None socket_options = [(6, 1, 1)] def create_connection( address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None, ): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`socket.getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. An host of '' or port 0 tells the OS to use the default. """ host, port = address if host.startswith("["): host = host.strip("[]") err = None # Using the value from allowed_gai_family() in the context of getaddrinfo lets # us select whether to work with IPv4 DNS records, IPv6 records, or both. # The original create_connection function always returns all records. family = allowed_gai_family() try: host.encode("idna") except UnicodeError: return six.raise_from( LocationParseError(u"'%s', label empty or too long" % host), None ) > for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): /usr/lib/python3.11/site-packages/urllib3/util/connection.py:72: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'cloudflare-dns.com', port = 443, family = type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: self = method = 'POST', url = '/dns-query' body = b'G\xc4\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x01\x00\x01' headers = {'User-Agent': 'python-requests/2.28.2', 'Accept-Encoding': 'gzip, deflate', 'accept': 'application/dns-message', 'Connection': 'keep-alive', 'content-type': 'application/dns-message', 'Content-Length': '29'} retries = Retry(total=0, connect=None, read=False, redirect=None, status=None) redirect = False, assert_same_host = False timeout = Timeout(connect=4, read=4, total=None), pool_timeout = None release_conn = False, chunked = False, body_pos = None response_kw = {'decode_content': False, 'preload_content': False} parsed_url = Url(scheme=None, auth=None, host=None, port=None, path='/dns-query', query=None, fragment=None) destination_scheme = None, conn = None, release_this_conn = True http_tunnel_required = False, err = None, clean_exit = False def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param url: The URL to perform the request on. :param body: Data to send in the request body, either :class:`str`, :class:`bytes`, an iterable of :class:`str`/:class:`bytes`, or a file-like object. :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When ``False``, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ parsed_url = parse_url(url) destination_scheme = parsed_url.scheme if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) # Ensure that the URL we're connecting to is properly encoded if url.startswith("/"): url = six.ensure_str(_encode_target(url)) else: url = six.ensure_str(parsed_url.url) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] release_this_conn = release_conn http_tunnel_required = connection_requires_http_tunnel( self.proxy, self.proxy_config, destination_scheme ) # Merge the proxy headers. Only done when not using HTTP CONNECT. We # have to copy the headers dict so we can safely change it without those # changes being reflected in anyone else's copy. if not http_tunnel_required: headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr( conn, "sock", None ) if is_new_proxy_conn and http_tunnel_required: self._prepare_proxy(conn) # Make the request on the httplib connection object. > httplib_response = self._make_request( conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked, ) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:703: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = conn = method = 'POST', url = '/dns-query' timeout = Timeout(connect=4, read=4, total=None), chunked = False httplib_request_kw = {'body': b'G\xc4\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x01\x00\x01', 'headers': {'User-Age...plication/dns-message', 'Connection': 'keep-alive', 'content-type': 'application/dns-message', 'Content-Length': '29'}} timeout_obj = Timeout(connect=4, read=4, total=None) def _make_request( self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw ): """ Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout # Trigger any extra validation we need to do. try: > self._validate_conn(conn) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:386: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = conn = def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, "sock", None): # AppEngine might not have `.sock` > conn.connect() /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:1042: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def connect(self): # Add certificate verification > self.sock = conn = self._new_conn() /usr/lib/python3.11/site-packages/urllib3/connection.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def _new_conn(self): """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw["source_address"] = self.source_address if self.socket_options: extra_kw["socket_options"] = self.socket_options try: conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw ) except SocketTimeout: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout), ) except SocketError as e: > raise NewConnectionError( self, "Failed to establish a new connection: %s" % e ) E urllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/urllib3/connection.py:186: NewConnectionError During handling of the above exception, another exception occurred: self = request = , stream = False timeout = Timeout(connect=4, read=4, total=None), verify = True, cert = None proxies = OrderedDict() def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection(request.url, proxies) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers( request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, ) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError: raise ValueError( f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " f"or a single float to set both timeouts to the same value." ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: > resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, ) /usr/lib/python3.11/site-packages/requests/adapters.py:489: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = method = 'POST', url = '/dns-query' body = b'G\xc4\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x01\x00\x01' headers = {'User-Agent': 'python-requests/2.28.2', 'Accept-Encoding': 'gzip, deflate', 'accept': 'application/dns-message', 'Connection': 'keep-alive', 'content-type': 'application/dns-message', 'Content-Length': '29'} retries = Retry(total=0, connect=None, read=False, redirect=None, status=None) redirect = False, assert_same_host = False timeout = Timeout(connect=4, read=4, total=None), pool_timeout = None release_conn = False, chunked = False, body_pos = None response_kw = {'decode_content': False, 'preload_content': False} parsed_url = Url(scheme=None, auth=None, host=None, port=None, path='/dns-query', query=None, fragment=None) destination_scheme = None, conn = None, release_this_conn = True http_tunnel_required = False, err = None, clean_exit = False def urlopen( self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw ): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param url: The URL to perform the request on. :param body: Data to send in the request body, either :class:`str`, :class:`bytes`, an iterable of :class:`str`/:class:`bytes`, or a file-like object. :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When ``False``, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ parsed_url = parse_url(url) destination_scheme = parsed_url.scheme if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get("preload_content", True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) # Ensure that the URL we're connecting to is properly encoded if url.startswith("/"): url = six.ensure_str(_encode_target(url)) else: url = six.ensure_str(parsed_url.url) conn = None # Track whether `conn` needs to be released before # returning/raising/recursing. Update this variable if necessary, and # leave `release_conn` constant throughout the function. That way, if # the function recurses, the original value of `release_conn` will be # passed down into the recursive call, and its value will be respected. # # See issue #651 [1] for details. # # [1] release_this_conn = release_conn http_tunnel_required = connection_requires_http_tunnel( self.proxy, self.proxy_config, destination_scheme ) # Merge the proxy headers. Only done when not using HTTP CONNECT. We # have to copy the headers dict so we can safely change it without those # changes being reflected in anyone else's copy. if not http_tunnel_required: headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None # Keep track of whether we cleanly exited the except block. This # ensures we do proper cleanup in finally. clean_exit = False # Rewind body position, if needed. Record current position # for future rewinds in the event of a redirect/retry. body_pos = set_file_position(body, body_pos) try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr( conn, "sock", None ) if is_new_proxy_conn and http_tunnel_required: self._prepare_proxy(conn) # Make the request on the httplib connection object. httplib_response = self._make_request( conn, method, url, timeout=timeout_obj, body=body, headers=headers, chunked=chunked, ) # If we're going to release the connection in ``finally:``, then # the response doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = conn if not release_conn else None # Pass method to Response for length checking response_kw["request_method"] = method # Import httplib's response into our own wrapper object response = self.ResponseCls.from_httplib( httplib_response, pool=self, connection=response_conn, retries=retries, **response_kw ) # Everything went great! clean_exit = True except EmptyPoolError: # Didn't get a connection from the pool, no need to clean up clean_exit = True release_this_conn = False raise except ( TimeoutError, HTTPException, SocketError, ProtocolError, BaseSSLError, SSLError, CertificateError, ) as e: # Discard the connection for these exceptions. It will be # replaced during the next _get_conn() call. clean_exit = False def _is_ssl_error_message_from_http_proxy(ssl_error): # We're trying to detect the message 'WRONG_VERSION_NUMBER' but # SSLErrors are kinda all over the place when it comes to the message, # so we try to cover our bases here! message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) return ( "wrong version number" in message or "unknown protocol" in message ) # Try to detect a common user error with proxies which is to # set an HTTP proxy to be HTTPS when it should be 'http://' # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) # Instead we add a nice error message and point to a URL. if ( isinstance(e, BaseSSLError) and self.proxy and _is_ssl_error_message_from_http_proxy(e) and conn.proxy and conn.proxy.scheme == "https" ): e = ProxyError( "Your proxy appears to only use HTTP and not HTTPS, " "try changing your proxy URL to be HTTP. See: " "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" "#https-proxy-error-http-proxy", SSLError(e), ) elif isinstance(e, (BaseSSLError, CertificateError)): e = SSLError(e) elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError("Cannot connect to proxy.", e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError("Connection aborted.", e) > retries = retries.increment( method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] ) /usr/lib/python3.11/site-packages/urllib3/connectionpool.py:787: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = Retry(total=0, connect=None, read=False, redirect=None, status=None) method = 'POST', url = '/dns-query', response = None error = NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution') _pool = _stacktrace = def increment( self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None, ): """Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object. """ if self.total is False and error: # Disabled, indicate to re-raise the error. raise six.reraise(type(error), error, _stacktrace) total = self.total if total is not None: total -= 1 connect = self.connect read = self.read redirect = self.redirect status_count = self.status other = self.other cause = "unknown" status = None redirect_location = None if error and self._is_connection_error(error): # Connect retry? if connect is False: raise six.reraise(type(error), error, _stacktrace) elif connect is not None: connect -= 1 elif error and self._is_read_error(error): # Read retry? if read is False or not self._is_method_retryable(method): raise six.reraise(type(error), error, _stacktrace) elif read is not None: read -= 1 elif error: # Other retry? if other is not None: other -= 1 elif response and response.get_redirect_location(): # Redirect retry? if redirect is not None: redirect -= 1 cause = "too many redirects" redirect_location = response.get_redirect_location() status = response.status else: # Incrementing because of a server error like a 500 in # status_forcelist and the given method is in the allowed_methods cause = ResponseError.GENERIC_ERROR if response and response.status: if status_count is not None: status_count -= 1 cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status) status = response.status history = self.history + ( RequestHistory(method, url, error, status, redirect_location), ) new_retry = self.new( total=total, connect=connect, read=read, redirect=redirect, status=status_count, other=other, history=history, ) if new_retry.is_exhausted(): > raise MaxRetryError(_pool, url, error or ResponseError(cause)) E urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='cloudflare-dns.com', port=443): Max retries exceeded with url: /dns-query (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) /usr/lib/python3.11/site-packages/urllib3/util/retry.py:592: MaxRetryError During handling of the above exception, another exception occurred: self = def test_post_request(self): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) > r = dns.query.https( q, nameserver_url, session=self.session, post=True, timeout=4 ) tests/test_doh.py:92: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:416: in https response = session.post( /usr/lib/python3.11/site-packages/requests/sessions.py:635: in post return self.request("POST", url, data=data, json=json, **kwargs) /usr/lib/python3.11/site-packages/requests/sessions.py:587: in request resp = self.send(prep, **send_kwargs) /usr/lib/python3.11/site-packages/requests/sessions.py:701: in send r = adapter.send(request, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = , stream = False timeout = Timeout(connect=4, read=4, total=None), verify = True, cert = None proxies = OrderedDict() def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest ` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) ` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection(request.url, proxies) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers( request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, ) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError: raise ValueError( f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " f"or a single float to set both timeouts to the same value." ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, ) # Send the request. else: if hasattr(conn, "proxy_pool"): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: skip_host = "Host" in request.headers low_conn.putrequest( request.method, url, skip_accept_encoding=True, skip_host=skip_host, ) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode("utf-8")) low_conn.send(b"\r\n") low_conn.send(i) low_conn.send(b"\r\n") low_conn.send(b"0\r\n\r\n") # Receive the response from the server r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False, ) except Exception: # If we hit any problems here, clean up the connection. # Then, raise so that we can handle the actual exception. low_conn.close() raise except (ProtocolError, OSError) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) if isinstance(e.reason, _ProxyError): raise ProxyError(e, request=request) if isinstance(e.reason, _SSLError): # This branch is for urllib3 v1.22 and later. raise SSLError(e, request=request) > raise ConnectionError(e, request=request) E requests.exceptions.ConnectionError: HTTPSConnectionPool(host='cloudflare-dns.com', port=443): Max retries exceeded with url: /dns-query (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) /usr/lib/python3.11/site-packages/requests/adapters.py:565: ConnectionError __________________ DNSOverHTTPSTestCaseRequests.test_resolver __________________ self = def test_resolver(self): res = dns.resolver.Resolver(configure=False) res.nameservers = ["https://dns.google/dns-query"] > answer = res.resolve("dns.google", "A") tests/test_doh.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1204: in resolve timeout = self._compute_timeout(start, lifetime, resolution.errors) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = start = 1681629482.5082703, lifetime = 5.0 errors = [('https://dns.google/dns-query', False, 53, ConnectError('[Errno -3] Temporary failure in name resolution'), None), (...e), ('https://dns.google/dns-query', False, 53, ConnectError('[Errno -3] Temporary failure in name resolution'), None)] def _compute_timeout( self, start: float, lifetime: Optional[float] = None, errors: Optional[List[ErrorTuple]] = None, ) -> float: lifetime = self.lifetime if lifetime is None else lifetime now = time.time() duration = now - start if errors is None: errors = [] if duration < 0: if duration < -1: # Time going backwards is bad. Just give up. raise LifetimeTimeout(timeout=duration, errors=errors) else: # Time went backwards, but only a little. This can # happen, e.g. under vmware with older linux kernels. # Pretend it didn't happen. duration = 0 if duration >= lifetime: > raise LifetimeTimeout(timeout=duration, errors=errors) E dns.resolver.LifetimeTimeout: The resolution lifetime expired after 5.834 seconds: Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution dns/resolver.py:988: LifetimeTimeout _______________ DNSOverHTTPSTestCaseHttpx.test_build_url_from_ip _______________ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = '1.1.1.1', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} with map_exceptions(exc_map): > sock = socket.create_connection( address, timeout, source_address=source_address ) /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('1.1.1.1', 443), timeout = 4, source_address = None def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] for res in getaddrinfo(host, port, 0, SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket(af, socktype, proto) if timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) # Break explicitly a reference cycle exceptions.clear() return sock except error as exc: if not all_errors: exceptions.clear() # raise only the last error exceptions.append(exc) if sock is not None: sock.close() if len(exceptions): try: if not all_errors: > raise exceptions[0] /usr/lib64/python3.11/socket.py:851: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('1.1.1.1', 443), timeout = 4, source_address = None def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] for res in getaddrinfo(host, port, 0, SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket(af, socktype, proto) if timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) > sock.connect(sa) E TimeoutError: timed out /usr/lib64/python3.11/socket.py:836: TimeoutError During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request( self, request: Request, ) -> Response: assert isinstance(request.stream, SyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = self._pool.handle_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:218: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: response = connection.handle_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() self._attempt_to_acquire_connection(status) except BaseException as exc: self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: > response = connection.handle_request(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: stream = self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import HTTP2Connection self._connection = HTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = HTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: > stream = self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def _connect(self, request: Request) -> NetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = '1.1.1.1', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:85: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = , value = TimeoutError('timed out') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectTimeout: timed out /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectTimeout The above exception was the direct cause of the following exception: self = def test_build_url_from_ip(self): self.assertTrue(resolver_v4_addresses or resolver_v6_addresses) if resolver_v4_addresses: nameserver_ip = random.choice(resolver_v4_addresses) q = dns.message.make_query("example.com.", dns.rdatatype.A) # For some reason Google's DNS over HTTPS fails when you POST to # https://8.8.8.8/dns-query # So we're just going to do GET requests here > r = dns.query.https( q, nameserver_ip, session=self.session, post=False, timeout=4 ) tests/test_doh.py:198: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:423: in https response = session.get( /usr/lib/python3.11/site-packages/httpx/_client.py:1039: in get return self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:815: in request return self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:902: in send response = self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:930: in _send_handling_auth response = self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:967: in _send_handling_redirects response = self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1003: in _send_single_request response = transport.handle_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:217: in handle_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectTimeout: timed out /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectTimeout __________________ DNSOverHTTPSTestCaseHttpx.test_get_request __________________ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} with map_exceptions(exc_map): > sock = socket.create_connection( address, timeout, source_address=source_address ) /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('dns.google', 443), timeout = 4, source_address = None def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] > for res in getaddrinfo(host, port, 0, SOCK_STREAM): /usr/lib64/python3.11/socket.py:827: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'dns.google', port = 443, family = 0, type = proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request( self, request: Request, ) -> Response: assert isinstance(request.stream, SyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = self._pool.handle_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:218: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: response = connection.handle_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() self._attempt_to_acquire_connection(status) except BaseException as exc: self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: > response = connection.handle_request(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: stream = self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import HTTP2Connection self._connection = HTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = HTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: > stream = self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def _connect(self, request: Request) -> NetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'dns.google', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:85: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = def test_get_request(self): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) > r = dns.query.https( q, nameserver_url, session=self.session, post=False, timeout=4 ) tests/test_doh.py:164: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:423: in https response = session.get( /usr/lib/python3.11/site-packages/httpx/_client.py:1039: in get return self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:815: in request return self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:902: in send response = self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:930: in _send_handling_auth response = self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:967: in _send_handling_redirects response = self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1003: in _send_single_request response = transport.handle_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:217: in handle_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError _______________ DNSOverHTTPSTestCaseHttpx.test_get_request_http1 _______________ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} with map_exceptions(exc_map): > sock = socket.create_connection( address, timeout, source_address=source_address ) /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('cloudflare-dns.com', 443), timeout = 4, source_address = None def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] > for res in getaddrinfo(host, port, 0, SOCK_STREAM): /usr/lib64/python3.11/socket.py:827: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request( self, request: Request, ) -> Response: assert isinstance(request.stream, SyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = self._pool.handle_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:218: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: response = connection.handle_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() self._attempt_to_acquire_connection(status) except BaseException as exc: self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: > response = connection.handle_request(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: stream = self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import HTTP2Connection self._connection = HTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = HTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: > stream = self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def _connect(self, request: Request) -> NetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:85: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = def test_get_request_http1(self): saved_have_http2 = dns.query._have_http2 try: dns.query._have_http2 = False nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) > r = dns.query.https( q, nameserver_url, session=self.session, post=False, timeout=4 ) tests/test_doh.py:175: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:423: in https response = session.get( /usr/lib/python3.11/site-packages/httpx/_client.py:1039: in get return self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:815: in request return self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:902: in send response = self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:930: in _send_handling_auth response = self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:967: in _send_handling_redirects response = self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1003: in _send_single_request response = transport.handle_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:217: in handle_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError __________________ DNSOverHTTPSTestCaseHttpx.test_new_session __________________ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} with map_exceptions(exc_map): > sock = socket.create_connection( address, timeout, source_address=source_address ) /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('cloudflare-dns.com', 443), timeout = 4, source_address = None def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] > for res in getaddrinfo(host, port, 0, SOCK_STREAM): /usr/lib64/python3.11/socket.py:827: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request( self, request: Request, ) -> Response: assert isinstance(request.stream, SyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = self._pool.handle_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:218: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: response = connection.handle_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() self._attempt_to_acquire_connection(status) except BaseException as exc: self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: > response = connection.handle_request(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: stream = self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import HTTP2Connection self._connection = HTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = HTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: > stream = self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def _connect(self, request: Request) -> NetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:85: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = def test_new_session(self): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) > r = dns.query.https(q, nameserver_url, timeout=4) tests/test_doh.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:412: in https response = session.post( /usr/lib/python3.11/site-packages/httpx/_client.py:1130: in post return self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:815: in request return self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:902: in send response = self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:930: in _send_handling_auth response = self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:967: in _send_handling_redirects response = self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1003: in _send_single_request response = transport.handle_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:217: in handle_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError __________________ DNSOverHTTPSTestCaseHttpx.test_padded_get ___________________ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} with map_exceptions(exc_map): > sock = socket.create_connection( address, timeout, source_address=source_address ) /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('cloudflare-dns.com', 443), timeout = 4, source_address = None def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] > for res in getaddrinfo(host, port, 0, SOCK_STREAM): /usr/lib64/python3.11/socket.py:827: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request( self, request: Request, ) -> Response: assert isinstance(request.stream, SyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = self._pool.handle_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:218: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: response = connection.handle_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() self._attempt_to_acquire_connection(status) except BaseException as exc: self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: > response = connection.handle_request(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: stream = self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import HTTP2Connection self._connection = HTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = HTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: > stream = self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def _connect(self, request: Request) -> NetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:85: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = def test_padded_get(self): nameserver_url = random.choice(KNOWN_PAD_AWARE_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A, use_edns=0, pad=128) > r = dns.query.https( q, nameserver_url, session=self.session, post=False, timeout=4 ) tests/test_doh.py:251: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:423: in https response = session.get( /usr/lib/python3.11/site-packages/httpx/_client.py:1039: in get return self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:815: in request return self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:902: in send response = self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:930: in _send_handling_auth response = self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:967: in _send_handling_redirects response = self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1003: in _send_single_request response = transport.handle_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:217: in handle_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError _________________ DNSOverHTTPSTestCaseHttpx.test_post_request __________________ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} with map_exceptions(exc_map): > sock = socket.create_connection( address, timeout, source_address=source_address ) /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ address = ('cloudflare-dns.com', 443), timeout = 4, source_address = None def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] > for res in getaddrinfo(host, port, 0, SOCK_STREAM): /usr/lib64/python3.11/socket.py:827: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'cloudflare-dns.com', port = 443, family = 0 type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror During handling of the above exception, another exception occurred: @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: > yield /usr/lib/python3.11/site-packages/httpx/_transports/default.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request( self, request: Request, ) -> Response: assert isinstance(request.stream, SyncByteStream) req = httpcore.Request( method=request.method, url=httpcore.URL( scheme=request.url.raw_scheme, host=request.url.raw_host, port=request.url.port, target=request.url.raw_path, ), headers=request.headers.raw, content=request.stream, extensions=request.extensions, ) with map_httpcore_exceptions(): > resp = self._pool.handle_request(req) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:218: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: response = connection.handle_request(request) except ConnectionNotAvailable: # The ConnectionNotAvailable exception is a special case, that # indicates we need to retry the request on a new connection. # # The most common case where this can occur is when multiple # requests are queued waiting for a single connection, which # might end up as an HTTP/2 connection, but which actually ends # up as HTTP/1.1. with self._pool_lock: # Maintain our position in the request queue, but reset the # status so that the request becomes queued again. status.unset_connection() self._attempt_to_acquire_connection(status) except BaseException as exc: self.response_closed(status) > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:253: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = request = def handle_request(self, request: Request) -> Response: """ Send an HTTP request, and return an HTTP response. This is the core implementation that is called into by `.request()` or `.stream()`. """ scheme = request.url.scheme.decode() if scheme == "": raise UnsupportedProtocol( "Request URL is missing an 'http://' or 'https://' protocol." ) if scheme not in ("http", "https"): raise UnsupportedProtocol( f"Request URL has an unsupported protocol '{scheme}://'." ) status = RequestStatus(request) with self._pool_lock: self._requests.append(status) self._close_expired_connections() self._attempt_to_acquire_connection(status) while True: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("pool", None) try: connection = status.wait_for_connection(timeout=timeout) except BaseException as exc: # If we timeout here, or if the task is cancelled, then make # sure to remove the request from the queue before bubbling # up the exception. with self._pool_lock: self._requests.remove(status) raise exc try: > response = connection.handle_request(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection_pool.py:237: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: stream = self._connect(request) ssl_object = stream.get_extra_info("ssl_object") http2_negotiated = ( ssl_object is not None and ssl_object.selected_alpn_protocol() == "h2" ) if http2_negotiated or (self._http2 and not self._http1): from .http2 import HTTP2Connection self._connection = HTTP2Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) else: self._connection = HTTP11Connection( origin=self._origin, stream=stream, keepalive_expiry=self._keepalive_expiry, ) except Exception as exc: self._connect_failed = True > raise exc /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:86: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def handle_request(self, request: Request) -> Response: if not self.can_handle_request(request.url.origin): raise RuntimeError( f"Attempted to send request to {request.url.origin} on connection to {self._origin}" ) with self._request_lock: if self._connection is None: try: > stream = self._connect(request) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:63: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , request = def _connect(self, request: Request) -> NetworkStream: timeouts = request.extensions.get("timeout", {}) timeout = timeouts.get("connect", None) retries_left = self._retries delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) while True: try: if self._uds is None: kwargs = { "host": self._origin.host.decode("ascii"), "port": self._origin.port, "local_address": self._local_address, "timeout": timeout, } with Trace( "connection.connect_tcp", request, kwargs ) as trace: > stream = self._network_backend.connect_tcp(**kwargs) /usr/lib/python3.11/site-packages/httpcore/_sync/connection.py:111: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = host = 'cloudflare-dns.com', port = 443, timeout = 4, local_address = None def connect_tcp( self, host: str, port: int, timeout: typing.Optional[float] = None, local_address: typing.Optional[str] = None, ) -> NetworkStream: address = (host, port) source_address = None if local_address is None else (local_address, 0) exc_map = {socket.timeout: ConnectTimeout, socket.error: ConnectError} > with map_exceptions(exc_map): /usr/lib/python3.11/site-packages/httpcore/backends/sync.py:85: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = typ = value = gaierror(-3, 'Temporary failure in name resolution') traceback = def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: # Need to force instantiation so we can reliably # tell if we get the same exception back value = typ() try: > self.gen.throw(typ, value, traceback) /usr/lib64/python3.11/contextlib.py:155: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ map = {: , : } @contextlib.contextmanager def map_exceptions(map: Dict[Type, Type]) -> Iterator[None]: try: yield except Exception as exc: # noqa: PIE786 for from_exc, to_exc in map.items(): if isinstance(exc, from_exc): > raise to_exc(exc) E httpcore.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpcore/_exceptions.py:12: ConnectError The above exception was the direct cause of the following exception: self = def test_post_request(self): nameserver_url = random.choice(KNOWN_ANYCAST_DOH_RESOLVER_URLS) q = dns.message.make_query("example.com.", dns.rdatatype.A) > r = dns.query.https( q, nameserver_url, session=self.session, post=True, timeout=4 ) tests/test_doh.py:185: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:412: in https response = session.post( /usr/lib/python3.11/site-packages/httpx/_client.py:1130: in post return self.request( /usr/lib/python3.11/site-packages/httpx/_client.py:815: in request return self.send(request, auth=auth, follow_redirects=follow_redirects) /usr/lib/python3.11/site-packages/httpx/_client.py:902: in send response = self._send_handling_auth( /usr/lib/python3.11/site-packages/httpx/_client.py:930: in _send_handling_auth response = self._send_handling_redirects( /usr/lib/python3.11/site-packages/httpx/_client.py:967: in _send_handling_redirects response = self._send_single_request(request) /usr/lib/python3.11/site-packages/httpx/_client.py:1003: in _send_single_request response = transport.handle_request(request) /usr/lib/python3.11/site-packages/httpx/_transports/default.py:217: in handle_request with map_httpcore_exceptions(): /usr/lib64/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @contextlib.contextmanager def map_httpcore_exceptions() -> typing.Iterator[None]: try: yield except Exception as exc: # noqa: PIE-786 mapped_exc = None for from_exc, to_exc in HTTPCORE_EXC_MAP.items(): if not isinstance(exc, from_exc): continue # We want to map to the most specific exception we can find. # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to # `httpx.ReadTimeout`, not just `httpx.TimeoutException`. if mapped_exc is None or issubclass(to_exc, mapped_exc): mapped_exc = to_exc if mapped_exc is None: # pragma: nocover raise message = str(exc) > raise mapped_exc(message) from exc E httpx.ConnectError: [Errno -3] Temporary failure in name resolution /usr/lib/python3.11/site-packages/httpx/_transports/default.py:77: ConnectError ___________________ DNSOverHTTPSTestCaseHttpx.test_resolver ____________________ self = def test_resolver(self): res = dns.resolver.Resolver(configure=False) res.nameservers = ["https://dns.google/dns-query"] > answer = res.resolve("dns.google", "A") tests/test_doh.py:243: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1204: in resolve timeout = self._compute_timeout(start, lifetime, resolution.errors) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = start = 1681629512.2720428, lifetime = 5.0 errors = [('https://dns.google/dns-query', False, 53, ConnectError('[Errno -3] Temporary failure in name resolution'), None), (...e), ('https://dns.google/dns-query', False, 53, ConnectError('[Errno -3] Temporary failure in name resolution'), None)] def _compute_timeout( self, start: float, lifetime: Optional[float] = None, errors: Optional[List[ErrorTuple]] = None, ) -> float: lifetime = self.lifetime if lifetime is None else lifetime now = time.time() duration = now - start if errors is None: errors = [] if duration < 0: if duration < -1: # Time going backwards is bad. Just give up. raise LifetimeTimeout(timeout=duration, errors=errors) else: # Time went backwards, but only a little. This can # happen, e.g. under vmware with older linux kernels. # Pretend it didn't happen. duration = 0 if duration >= lifetime: > raise LifetimeTimeout(timeout=duration, errors=errors) E dns.resolver.LifetimeTimeout: The resolution lifetime expired after 5.878 seconds: Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution; Server https://dns.google/dns-query UDP port 53 answered [Errno -3] Temporary failure in name resolution dns/resolver.py:988: LifetimeTimeout ___________________________ QueryTests.testQueryTLS ____________________________ self = @unittest.skipUnless(have_ssl, "No SSL support") def testQueryTLS(self): for address in query_addresses: qname = dns.name.from_text("dns.google.") q = dns.message.make_query(qname, dns.rdatatype.A) > response = dns.query.tls(q, address, timeout=2) tests/test_query.py:134: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:1047: in tls _connect(s, destination, expiration) dns/query.py:873: in _connect _wait_for_writable(s, expiration) dns/query.py:182: in _wait_for_writable _wait_for(s, False, True, True, expiration) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ fd = , readable = False writable = True, _ = True, expiration = 1681629531.5324867 def _wait_for(fd, readable, writable, _, expiration): # Use the selected selector class to wait for any of the specified # events. An "expiration" absolute time is converted into a relative # timeout. # # The unused parameter is 'error', which is always set when # selecting for read or write, and we have no error-only selects. if readable and isinstance(fd, ssl.SSLSocket) and fd.pending() > 0: return True sel = _selector_class() events = 0 if readable: events |= selectors.EVENT_READ if writable: events |= selectors.EVENT_WRITE if events: sel.register(fd, events) if expiration is None: timeout = None else: timeout = expiration - time.time() if timeout <= 0.0: raise dns.exception.Timeout if not sel.select(timeout): > raise dns.exception.Timeout E dns.exception.Timeout: The DNS operation timed out. dns/query.py:154: Timeout ______________________ QueryTests.testQueryTLSWithSocket _______________________ self = @unittest.skipUnless(have_ssl, "No SSL support") def testQueryTLSWithSocket(self): for address in query_addresses: with socket.socket( dns.inet.af_for_address(address), socket.SOCK_STREAM ) as base_s: ll = dns.inet.low_level_address_tuple((address, 853)) base_s.settimeout(2) > base_s.connect(ll) E TimeoutError: timed out tests/test_query.py:151: TimeoutError ______________________ QueryTests.testQueryTLSwithPadding ______________________ self = @unittest.skipUnless(have_ssl, "No SSL support") def testQueryTLSwithPadding(self): for address in query_addresses: qname = dns.name.from_text("dns.google.") q = dns.message.make_query(qname, dns.rdatatype.A, use_edns=0, pad=128) > response = dns.query.tls(q, address, timeout=2) tests/test_query.py:174: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/query.py:1047: in tls _connect(s, destination, expiration) dns/query.py:873: in _connect _wait_for_writable(s, expiration) dns/query.py:182: in _wait_for_writable _wait_for(s, False, True, True, expiration) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ fd = , readable = False writable = True, _ = True, expiration = 1681629536.4288583 def _wait_for(fd, readable, writable, _, expiration): # Use the selected selector class to wait for any of the specified # events. An "expiration" absolute time is converted into a relative # timeout. # # The unused parameter is 'error', which is always set when # selecting for read or write, and we have no error-only selects. if readable and isinstance(fd, ssl.SSLSocket) and fd.pending() > 0: return True sel = _selector_class() events = 0 if readable: events |= selectors.EVENT_READ if writable: events |= selectors.EVENT_WRITE if events: sel.register(fd, events) if expiration is None: timeout = None else: timeout = expiration - time.time() if timeout <= 0.0: raise dns.exception.Timeout if not sel.select(timeout): > raise dns.exception.Timeout E dns.exception.Timeout: The DNS operation timed out. dns/query.py:154: Timeout ___________________ LiveResolverTests.testCanonicalNameCNAME ___________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameCNAME(self): name = dns.name.from_text("www.dnspython.org") cname = dns.name.from_text("dmfrjf4ips8xa.cloudfront.net") > self.assertEqual(dns.resolver.canonical_name(name), cname) tests/test_resolver.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________ LiveResolverTests.testCanonicalNameDangling __________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = @unittest.skipIf(_systemd_resolved_present, "systemd-resolved in use") def testCanonicalNameDangling(self): name = dns.name.from_text("dangling-cname.dnspython.org") cname = dns.name.from_text("dangling-target.dnspython.org") > self.assertEqual(dns.resolver.canonical_name(name), cname) tests/test_resolver.py:733: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration __________________ LiveResolverTests.testCanonicalNameNoCNAME __________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameNoCNAME(self): cname = dns.name.from_text("www.google.com") > self.assertEqual(dns.resolver.canonical_name("www.google.com"), cname) tests/test_resolver.py:722: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ________________________ LiveResolverTests.testResolve _________________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolve(self): > answer = dns.resolver.resolve("dns.google.", "A") tests/test_resolver.py:651: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _____________________ LiveResolverTests.testResolveAddress _____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveAddress(self): > answer = dns.resolver.resolve_address("8.8.8.8") tests/test_resolver.py:663: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1421: in resolve_address return get_default_resolver().resolve_address(ipaddr, *args, **kwargs) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ LiveResolverTests.testResolveEdnsOptions ___________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = message_use_edns_mock = @patch.object(dns.message.Message, "use_edns") def testResolveEdnsOptions(self, message_use_edns_mock): > resolver = dns.resolver.Resolver() tests/test_resolver.py:669: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________________ LiveResolverTests.testResolveNXDOMAIN _____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNXDOMAIN(self): qname = dns.name.from_text("nxdomain.dnspython.org") qclass = dns.rdataclass.from_text("IN") qtype = dns.rdatatype.from_text("A") def bad(): answer = dns.resolver.resolve(qname, qtype) try: > dns.resolver.resolve(qname, qtype) tests/test_resolver.py:702: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration __________________ LiveResolverTests.testResolveNodataAnswer ___________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNodataAnswer(self): qname = dns.name.from_text("dnspython.org") qclass = dns.rdataclass.from_text("IN") qtype = dns.rdatatype.from_text("SRV") > answer = dns.resolver.resolve(qname, qtype, raise_on_no_answer=False) tests/test_resolver.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________ LiveResolverTests.testResolveNodataException _________________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNodataException(self): def bad(): dns.resolver.resolve("dnspython.org.", "SRV") > self.assertRaises(dns.resolver.NoAnswer, bad) tests/test_resolver.py:679: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_resolver.py:677: in bad dns.resolver.resolve("dnspython.org.", "SRV") dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ LiveResolverTests.testResolveTCP _______________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveTCP(self): > answer = dns.resolver.resolve("dns.google.", "A", tcp=True) tests/test_resolver.py:657: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ______________________ LiveResolverTests.testZoneForName1 ______________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName1(self): name = dns.name.from_text("www.dnspython.org.") ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name(name) tests/test_resolver.py:629: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ______________________ LiveResolverTests.testZoneForName2 ______________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName2(self): name = dns.name.from_text("a.b.www.dnspython.org.") ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name(name) tests/test_resolver.py:635: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ______________________ LiveResolverTests.testZoneForName3 ______________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName3(self): ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name("dnspython.org.") tests/test_resolver.py:640: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ______________________ LiveResolverTests.testZoneForName4 ______________________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName4(self): def bad(): name = dns.name.from_text("dnspython.org", None) dns.resolver.zone_for_name(name) > self.assertRaises(dns.resolver.NotAbsolute, bad) tests/test_resolver.py:648: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_resolver.py:646: in bad dns.resolver.zone_for_name(name) dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ________________ SelectResolverTestCase.testCanonicalNameCNAME _________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameCNAME(self): name = dns.name.from_text("www.dnspython.org") cname = dns.name.from_text("dmfrjf4ips8xa.cloudfront.net") > self.assertEqual(dns.resolver.canonical_name(name), cname) tests/test_resolver.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________ SelectResolverTestCase.testCanonicalNameDangling _______________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = @unittest.skipIf(_systemd_resolved_present, "systemd-resolved in use") def testCanonicalNameDangling(self): name = dns.name.from_text("dangling-cname.dnspython.org") cname = dns.name.from_text("dangling-target.dnspython.org") > self.assertEqual(dns.resolver.canonical_name(name), cname) tests/test_resolver.py:733: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________ SelectResolverTestCase.testCanonicalNameNoCNAME ________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameNoCNAME(self): cname = dns.name.from_text("www.google.com") > self.assertEqual(dns.resolver.canonical_name("www.google.com"), cname) tests/test_resolver.py:722: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ______________________ SelectResolverTestCase.testResolve ______________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolve(self): > answer = dns.resolver.resolve("dns.google.", "A") tests/test_resolver.py:651: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration __________________ SelectResolverTestCase.testResolveAddress ___________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveAddress(self): > answer = dns.resolver.resolve_address("8.8.8.8") tests/test_resolver.py:663: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1421: in resolve_address return get_default_resolver().resolve_address(ipaddr, *args, **kwargs) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ________________ SelectResolverTestCase.testResolveEdnsOptions _________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = message_use_edns_mock = @patch.object(dns.message.Message, "use_edns") def testResolveEdnsOptions(self, message_use_edns_mock): > resolver = dns.resolver.Resolver() tests/test_resolver.py:669: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration __________________ SelectResolverTestCase.testResolveNXDOMAIN __________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNXDOMAIN(self): qname = dns.name.from_text("nxdomain.dnspython.org") qclass = dns.rdataclass.from_text("IN") qtype = dns.rdatatype.from_text("A") def bad(): answer = dns.resolver.resolve(qname, qtype) try: > dns.resolver.resolve(qname, qtype) tests/test_resolver.py:702: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ________________ SelectResolverTestCase.testResolveNodataAnswer ________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNodataAnswer(self): qname = dns.name.from_text("dnspython.org") qclass = dns.rdataclass.from_text("IN") qtype = dns.rdatatype.from_text("SRV") > answer = dns.resolver.resolve(qname, qtype, raise_on_no_answer=False) tests/test_resolver.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ______________ SelectResolverTestCase.testResolveNodataException _______________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNodataException(self): def bad(): dns.resolver.resolve("dnspython.org.", "SRV") > self.assertRaises(dns.resolver.NoAnswer, bad) tests/test_resolver.py:679: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_resolver.py:677: in bad dns.resolver.resolve("dnspython.org.", "SRV") dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________________ SelectResolverTestCase.testResolveTCP _____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveTCP(self): > answer = dns.resolver.resolve("dns.google.", "A", tcp=True) tests/test_resolver.py:657: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ SelectResolverTestCase.testZoneForName1 ____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName1(self): name = dns.name.from_text("www.dnspython.org.") ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name(name) tests/test_resolver.py:629: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ SelectResolverTestCase.testZoneForName2 ____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName2(self): name = dns.name.from_text("a.b.www.dnspython.org.") ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name(name) tests/test_resolver.py:635: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ SelectResolverTestCase.testZoneForName3 ____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName3(self): ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name("dnspython.org.") tests/test_resolver.py:640: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ SelectResolverTestCase.testZoneForName4 ____________________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName4(self): def bad(): name = dns.name.from_text("dnspython.org", None) dns.resolver.zone_for_name(name) > self.assertRaises(dns.resolver.NotAbsolute, bad) tests/test_resolver.py:648: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_resolver.py:646: in bad dns.resolver.zone_for_name(name) dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________ PollResolverTestCase.testCanonicalNameCNAME __________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameCNAME(self): name = dns.name.from_text("www.dnspython.org") cname = dns.name.from_text("dmfrjf4ips8xa.cloudfront.net") > self.assertEqual(dns.resolver.canonical_name(name), cname) tests/test_resolver.py:727: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ________________ PollResolverTestCase.testCanonicalNameDangling ________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = @unittest.skipIf(_systemd_resolved_present, "systemd-resolved in use") def testCanonicalNameDangling(self): name = dns.name.from_text("dangling-cname.dnspython.org") cname = dns.name.from_text("dangling-target.dnspython.org") > self.assertEqual(dns.resolver.canonical_name(name), cname) tests/test_resolver.py:733: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ________________ PollResolverTestCase.testCanonicalNameNoCNAME _________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testCanonicalNameNoCNAME(self): cname = dns.name.from_text("www.google.com") > self.assertEqual(dns.resolver.canonical_name("www.google.com"), cname) tests/test_resolver.py:722: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1431: in canonical_name return get_default_resolver().canonical_name(name) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________________ PollResolverTestCase.testResolve _______________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolve(self): > answer = dns.resolver.resolve("dns.google.", "A") tests/test_resolver.py:651: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ PollResolverTestCase.testResolveAddress ____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveAddress(self): > answer = dns.resolver.resolve_address("8.8.8.8") tests/test_resolver.py:663: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1421: in resolve_address return get_default_resolver().resolve_address(ipaddr, *args, **kwargs) dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________ PollResolverTestCase.testResolveEdnsOptions __________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = message_use_edns_mock = @patch.object(dns.message.Message, "use_edns") def testResolveEdnsOptions(self, message_use_edns_mock): > resolver = dns.resolver.Resolver() tests/test_resolver.py:669: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ___________________ PollResolverTestCase.testResolveNXDOMAIN ___________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNXDOMAIN(self): qname = dns.name.from_text("nxdomain.dnspython.org") qclass = dns.rdataclass.from_text("IN") qtype = dns.rdatatype.from_text("A") def bad(): answer = dns.resolver.resolve(qname, qtype) try: > dns.resolver.resolve(qname, qtype) tests/test_resolver.py:702: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _________________ PollResolverTestCase.testResolveNodataAnswer _________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNodataAnswer(self): qname = dns.name.from_text("dnspython.org") qclass = dns.rdataclass.from_text("IN") qtype = dns.rdatatype.from_text("SRV") > answer = dns.resolver.resolve(qname, qtype, raise_on_no_answer=False) tests/test_resolver.py:685: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _______________ PollResolverTestCase.testResolveNodataException ________________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveNodataException(self): def bad(): dns.resolver.resolve("dnspython.org.", "SRV") > self.assertRaises(dns.resolver.NoAnswer, bad) tests/test_resolver.py:679: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_resolver.py:677: in bad dns.resolver.resolve("dnspython.org.", "SRV") dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration _____________________ PollResolverTestCase.testResolveTCP ______________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testResolveTCP(self): > answer = dns.resolver.resolve("dns.google.", "A", tcp=True) tests/test_resolver.py:657: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1368: in resolve return get_default_resolver().resolve( dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________________ PollResolverTestCase.testZoneForName1 _____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName1(self): name = dns.name.from_text("www.dnspython.org.") ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name(name) tests/test_resolver.py:629: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________________ PollResolverTestCase.testZoneForName2 _____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName2(self): name = dns.name.from_text("a.b.www.dnspython.org.") ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name(name) tests/test_resolver.py:635: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________________ PollResolverTestCase.testZoneForName3 _____________________ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName3(self): ezname = dns.name.from_text("dnspython.org.") > zname = dns.resolver.zone_for_name("dnspython.org.") tests/test_resolver.py:640: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________________ PollResolverTestCase.testZoneForName4 _____________________ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def testZoneForName4(self): def bad(): name = dns.name.from_text("dnspython.org", None) dns.resolver.zone_for_name(name) > self.assertRaises(dns.resolver.NotAbsolute, bad) tests/test_resolver.py:648: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_resolver.py:646: in bad dns.resolver.zone_for_name(name) dns/resolver.py:1469: in zone_for_name resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration ____________ OverrideSystemResolverTestCase.test_basic_getaddrinfo _____________ self = @unittest.skipIf( sys.platform == "win32", "avoid windows original getaddrinfo issues" ) def test_basic_getaddrinfo(self): self.assertTrue( > self.equivalent("dns.google", 53, socket.AF_INET, socket.SOCK_DGRAM) ) tests/test_resolver_override.py:97: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test_resolver_override.py:89: in equivalent b = dns.resolver._original_getaddrinfo(*args, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ host = 'dns.google', port = 53, family = type = , proto = 0, flags = 0 def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] > for res in _socket.getaddrinfo(host, port, family, type, proto, flags): E socket.gaierror: [Errno -3] Temporary failure in name resolution /usr/lib64/python3.11/socket.py:962: gaierror ______________ OverrideSystemResolverTestCase.test_gethostbyaddr _______________ self = def test_gethostbyaddr(self): > a = dns.resolver._original_gethostbyaddr("8.8.8.8") E socket.herror: [Errno 2] Host name lookup failure tests/test_resolver_override.py:193: herror _______ OverrideSystemResolverUsingDefaultResolverTestCase.test_override _______ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: > cm: contextlib.AbstractContextManager = open(f) E FileNotFoundError: [Errno 2] No such file or directory: '/etc/resolv.conf' dns/resolver.py:908: FileNotFoundError During handling of the above exception, another exception occurred: self = def setUp(self): self.res = FakeResolver() > dns.resolver.override_system_resolver() tests/test_resolver_override.py:242: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ dns/resolver.py:1762: in override_system_resolver resolver = get_default_resolver() dns/resolver.py:1331: in get_default_resolver reset_default_resolver() dns/resolver.py:1344: in reset_default_resolver default_resolver = Resolver() dns/resolver.py:861: in __init__ self.read_resolv_conf(filename) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , f = '/etc/resolv.conf' def read_resolv_conf(self, f: Any) -> None: """Process *f* as a file in the /etc/resolv.conf format. If f is a ``str``, it is used as the name of the file to open; otherwise it is treated as the file itself. Interprets the following items: - nameserver - name server IP address - domain - local domain name - search - search list for host-name lookup - options - supported options are rotate, timeout, edns0, and ndots """ if isinstance(f, str): try: cm: contextlib.AbstractContextManager = open(f) except OSError: # /etc/resolv.conf doesn't exist, can't be read, etc. > raise NoResolverConfiguration(f"cannot open {f}") E dns.resolver.NoResolverConfiguration: cannot open /etc/resolv.conf dns/resolver.py:911: NoResolverConfiguration =============================== warnings summary =============================== ../../../../usr/lib/python3.11/site-packages/httpx/_models.py:1 /usr/lib/python3.11/site-packages/httpx/_models.py:1: DeprecationWarning: 'cgi' is deprecated and slated for removal in Python 3.13 import cgi tests/test_async.py::TrioAsyncTests::testDOHGetRequest /usr/lib/python3.11/site-packages/anyio/_backends/_trio.py:126: TrioDeprecationWarning: trio.MultiError is deprecated since Trio 0.22.0; use BaseExceptionGroup (on Python 3.11 and later) or exceptiongroup.BaseExceptionGroup (earlier versions) instead (https://github.com/python-trio/trio/issues/2211) class ExceptionGroup(BaseExceptionGroup, trio.MultiError): -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html =========================== short test summary info ============================ FAILED tests/test_async.py::AsyncTests::testCanonicalNameCNAME - dns.resolver... FAILED tests/test_async.py::AsyncTests::testCanonicalNameDangling - dns.resol... FAILED tests/test_async.py::AsyncTests::testCanonicalNameNoCNAME - dns.resolv... FAILED tests/test_async.py::AsyncTests::testDOHGetRequest - httpx.ConnectErro... FAILED tests/test_async.py::AsyncTests::testDOHGetRequestHttp1 - httpx.Connec... FAILED tests/test_async.py::AsyncTests::testDOHPostRequest - httpx.ConnectErr... FAILED tests/test_async.py::AsyncTests::testQueryTLS - dns.exception.Timeout:... FAILED tests/test_async.py::AsyncTests::testQueryTLSWithSocket - dns.exceptio... FAILED tests/test_async.py::AsyncTests::testResolve - dns.resolver.NoResolver... FAILED tests/test_async.py::AsyncTests::testResolveAddress - dns.resolver.NoR... FAILED tests/test_async.py::AsyncTests::testResolverDOH - dns.resolver.Lifeti... FAILED tests/test_async.py::AsyncTests::testZoneForName1 - dns.resolver.NoRes... FAILED tests/test_async.py::AsyncTests::testZoneForName2 - dns.resolver.NoRes... FAILED tests/test_async.py::AsyncTests::testZoneForName3 - dns.resolver.NoRes... FAILED tests/test_async.py::AsyncTests::testZoneForName4 - dns.resolver.NoRes... FAILED tests/test_async.py::TrioAsyncTests::testCanonicalNameCNAME - dns.reso... FAILED tests/test_async.py::TrioAsyncTests::testCanonicalNameDangling - dns.r... FAILED tests/test_async.py::TrioAsyncTests::testCanonicalNameNoCNAME - dns.re... FAILED tests/test_async.py::TrioAsyncTests::testDOHGetRequest - httpx.Connect... FAILED tests/test_async.py::TrioAsyncTests::testDOHGetRequestHttp1 - httpx.Co... FAILED tests/test_async.py::TrioAsyncTests::testDOHPostRequest - httpx.Connec... FAILED tests/test_async.py::TrioAsyncTests::testQueryTLS - trio.ClosedResourc... FAILED tests/test_async.py::TrioAsyncTests::testQueryTLSWithSocket - trio.Clo... FAILED tests/test_async.py::TrioAsyncTests::testResolve - dns.resolver.NoReso... FAILED tests/test_async.py::TrioAsyncTests::testResolveAddress - dns.resolver... FAILED tests/test_async.py::TrioAsyncTests::testResolverDOH - dns.resolver.Li... FAILED tests/test_async.py::TrioAsyncTests::testZoneForName1 - dns.resolver.N... FAILED tests/test_async.py::TrioAsyncTests::testZoneForName2 - dns.resolver.N... FAILED tests/test_async.py::TrioAsyncTests::testZoneForName3 - dns.resolver.N... FAILED tests/test_async.py::TrioAsyncTests::testZoneForName4 - dns.resolver.N... FAILED tests/test_async.py::CurioAsyncTests::testCanonicalNameCNAME - dns.res... FAILED tests/test_async.py::CurioAsyncTests::testCanonicalNameDangling - dns.... FAILED tests/test_async.py::CurioAsyncTests::testCanonicalNameNoCNAME - dns.r... FAILED tests/test_async.py::CurioAsyncTests::testQueryTLS - UnboundLocalError... FAILED tests/test_async.py::CurioAsyncTests::testQueryTLSWithSocket - Unbound... FAILED tests/test_async.py::CurioAsyncTests::testResolve - dns.resolver.NoRes... FAILED tests/test_async.py::CurioAsyncTests::testResolveAddress - dns.resolve... FAILED tests/test_async.py::CurioAsyncTests::testZoneForName1 - dns.resolver.... FAILED tests/test_async.py::CurioAsyncTests::testZoneForName2 - dns.resolver.... FAILED tests/test_async.py::CurioAsyncTests::testZoneForName3 - dns.resolver.... FAILED tests/test_async.py::CurioAsyncTests::testZoneForName4 - dns.resolver.... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseRequests::test_build_url_from_ip FAILED tests/test_doh.py::DNSOverHTTPSTestCaseRequests::test_get_request - re... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseRequests::test_new_session - ht... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseRequests::test_post_request - r... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseRequests::test_resolver - dns.r... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseHttpx::test_build_url_from_ip FAILED tests/test_doh.py::DNSOverHTTPSTestCaseHttpx::test_get_request - httpx... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseHttpx::test_get_request_http1 FAILED tests/test_doh.py::DNSOverHTTPSTestCaseHttpx::test_new_session - httpx... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseHttpx::test_padded_get - httpx.... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseHttpx::test_post_request - http... FAILED tests/test_doh.py::DNSOverHTTPSTestCaseHttpx::test_resolver - dns.reso... FAILED tests/test_query.py::QueryTests::testQueryTLS - dns.exception.Timeout:... FAILED tests/test_query.py::QueryTests::testQueryTLSWithSocket - TimeoutError... FAILED tests/test_query.py::QueryTests::testQueryTLSwithPadding - dns.excepti... FAILED tests/test_resolver.py::LiveResolverTests::testCanonicalNameCNAME - dn... FAILED tests/test_resolver.py::LiveResolverTests::testCanonicalNameDangling FAILED tests/test_resolver.py::LiveResolverTests::testCanonicalNameNoCNAME - ... FAILED tests/test_resolver.py::LiveResolverTests::testResolve - dns.resolver.... FAILED tests/test_resolver.py::LiveResolverTests::testResolveAddress - dns.re... FAILED tests/test_resolver.py::LiveResolverTests::testResolveEdnsOptions - dn... FAILED tests/test_resolver.py::LiveResolverTests::testResolveNXDOMAIN - dns.r... FAILED tests/test_resolver.py::LiveResolverTests::testResolveNodataAnswer - d... FAILED tests/test_resolver.py::LiveResolverTests::testResolveNodataException FAILED tests/test_resolver.py::LiveResolverTests::testResolveTCP - dns.resolv... FAILED tests/test_resolver.py::LiveResolverTests::testZoneForName1 - dns.reso... FAILED tests/test_resolver.py::LiveResolverTests::testZoneForName2 - dns.reso... FAILED tests/test_resolver.py::LiveResolverTests::testZoneForName3 - dns.reso... FAILED tests/test_resolver.py::LiveResolverTests::testZoneForName4 - dns.reso... FAILED tests/test_resolver.py::SelectResolverTestCase::testCanonicalNameCNAME FAILED tests/test_resolver.py::SelectResolverTestCase::testCanonicalNameDangling FAILED tests/test_resolver.py::SelectResolverTestCase::testCanonicalNameNoCNAME FAILED tests/test_resolver.py::SelectResolverTestCase::testResolve - dns.reso... FAILED tests/test_resolver.py::SelectResolverTestCase::testResolveAddress - d... FAILED tests/test_resolver.py::SelectResolverTestCase::testResolveEdnsOptions FAILED tests/test_resolver.py::SelectResolverTestCase::testResolveNXDOMAIN - ... FAILED tests/test_resolver.py::SelectResolverTestCase::testResolveNodataAnswer FAILED tests/test_resolver.py::SelectResolverTestCase::testResolveNodataException FAILED tests/test_resolver.py::SelectResolverTestCase::testResolveTCP - dns.r... FAILED tests/test_resolver.py::SelectResolverTestCase::testZoneForName1 - dns... FAILED tests/test_resolver.py::SelectResolverTestCase::testZoneForName2 - dns... FAILED tests/test_resolver.py::SelectResolverTestCase::testZoneForName3 - dns... FAILED tests/test_resolver.py::SelectResolverTestCase::testZoneForName4 - dns... FAILED tests/test_resolver.py::PollResolverTestCase::testCanonicalNameCNAME FAILED tests/test_resolver.py::PollResolverTestCase::testCanonicalNameDangling FAILED tests/test_resolver.py::PollResolverTestCase::testCanonicalNameNoCNAME FAILED tests/test_resolver.py::PollResolverTestCase::testResolve - dns.resolv... FAILED tests/test_resolver.py::PollResolverTestCase::testResolveAddress - dns... FAILED tests/test_resolver.py::PollResolverTestCase::testResolveEdnsOptions FAILED tests/test_resolver.py::PollResolverTestCase::testResolveNXDOMAIN - dn... FAILED tests/test_resolver.py::PollResolverTestCase::testResolveNodataAnswer FAILED tests/test_resolver.py::PollResolverTestCase::testResolveNodataException FAILED tests/test_resolver.py::PollResolverTestCase::testResolveTCP - dns.res... FAILED tests/test_resolver.py::PollResolverTestCase::testZoneForName1 - dns.r... FAILED tests/test_resolver.py::PollResolverTestCase::testZoneForName2 - dns.r... FAILED tests/test_resolver.py::PollResolverTestCase::testZoneForName3 - dns.r... FAILED tests/test_resolver.py::PollResolverTestCase::testZoneForName4 - dns.r... FAILED tests/test_resolver_override.py::OverrideSystemResolverTestCase::test_basic_getaddrinfo FAILED tests/test_resolver_override.py::OverrideSystemResolverTestCase::test_gethostbyaddr FAILED tests/test_resolver_override.py::OverrideSystemResolverUsingDefaultResolverTestCase::test_override ===== 101 failed, 1215 passed, 7 skipped, 2 warnings in 346.37s (0:05:46) ====== RPM build errors: error: Bad exit status from /var/tmp/rpm-tmp.nPrBvy (%check) Bad exit status from /var/tmp/rpm-tmp.nPrBvy (%check) Child return code was: 1 EXCEPTION: [Error()] Traceback (most recent call last): File "/usr/lib/python3.10/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) File "/usr/lib/python3.10/site-packages/mockbuild/util.py", line 598, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --target noarch --nodeps /builddir/build/SPECS/python-dns.spec