Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2226377-52909/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1705017600 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2226377-52909/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1704931200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.hl9cOY + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf h2-4.1.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/h2-4.1.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd h2-4.1.0 + rm -rf /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fedora-tox-adjustments.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fix-repr-checks-for-Python-3.11.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.R9HEpw + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2226377-52909/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1704931200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.clZjec + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf h2-4.1.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/h2-4.1.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd h2-4.1.0 + rm -rf /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fedora-tox-adjustments.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fix-repr-checks-for-Python-3.11.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.U0mtEe + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating src/h2.egg-info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt writing manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) Handling tox-current-env >= 0.0.6 from tox itself Requirement not satisfied: tox-current-env >= 0.0.6 Exiting dependency generation pass: tox itself + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2226377-52909/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1704931200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.sbCqyO + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf h2-4.1.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/h2-4.1.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd h2-4.1.0 + rm -rf /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fedora-tox-adjustments.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fix-repr-checks-for-Python-3.11.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.iIu9Wb + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating src/h2.egg-info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt writing manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) Handling tox-current-env >= 0.0.6 from tox itself Requirement satisfied: tox-current-env >= 0.0.6 (installed: tox-current-env 0.0.11) py312: OK (3.92 seconds) congratulations :) (25.43 seconds) Handling tox from tox --print-deps-only: py312 Requirement satisfied: tox (installed: tox 4.11.1) Handling pytest>=6.0.1 from tox --print-deps-only: py312 Requirement not satisfied: pytest>=6.0.1 Handling hypothesis>=5.5,<7 from tox --print-deps-only: py312 Requirement not satisfied: hypothesis>=5.5,<7 running dist_info creating h2.egg-info writing h2.egg-info/PKG-INFO writing dependency_links to h2.egg-info/dependency_links.txt writing requirements to h2.egg-info/requires.txt writing top-level names to h2.egg-info/top_level.txt writing manifest file 'h2.egg-info/SOURCES.txt' reading manifest file 'h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'h2.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/h2-4.1.0/h2-4.1.0.dist-info' Handling hyperframe <7,>=6.0 from hook generated metadata: Requires-Dist (h2) Requirement not satisfied: hyperframe <7,>=6.0 Handling hpack <5,>=4.0 from hook generated metadata: Requires-Dist (h2) Requirement not satisfied: hpack <5,>=4.0 + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv h2-4.1.0.dist-info/ removed 'h2-4.1.0.dist-info/LICENSE' removed 'h2-4.1.0.dist-info/top_level.txt' removed 'h2-4.1.0.dist-info/METADATA' removed directory 'h2-4.1.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2226377-52909/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1704931200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.rZGc1J + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf h2-4.1.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/h2-4.1.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd h2-4.1.0 + rm -rf /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fedora-tox-adjustments.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fix-repr-checks-for-Python-3.11.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.OXjy8y + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating src/h2.egg-info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt writing manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) Handling tox-current-env >= 0.0.6 from tox itself Requirement satisfied: tox-current-env >= 0.0.6 (installed: tox-current-env 0.0.11) py312: OK (2.59 seconds) congratulations :) (21.92 seconds) Handling tox from tox --print-deps-only: py312 Requirement satisfied: tox (installed: tox 4.11.1) Handling pytest>=6.0.1 from tox --print-deps-only: py312 Requirement satisfied: pytest>=6.0.1 (installed: pytest 7.3.2) Handling hypothesis>=5.5,<7 from tox --print-deps-only: py312 Requirement satisfied: hypothesis>=5.5,<7 (installed: hypothesis 6.82.0) running dist_info creating h2.egg-info writing h2.egg-info/PKG-INFO writing dependency_links to h2.egg-info/dependency_links.txt writing requirements to h2.egg-info/requires.txt writing top-level names to h2.egg-info/top_level.txt writing manifest file 'h2.egg-info/SOURCES.txt' reading manifest file 'h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'h2.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/h2-4.1.0/h2-4.1.0.dist-info' Handling hyperframe <7,>=6.0 from hook generated metadata: Requires-Dist (h2) Requirement satisfied: hyperframe <7,>=6.0 (installed: hyperframe 6.0.1) Handling hpack <5,>=4.0 from hook generated metadata: Requires-Dist (h2) Requirement satisfied: hpack <5,>=4.0 (installed: hpack 4.0.0) + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv h2-4.1.0.dist-info/ removed 'h2-4.1.0.dist-info/LICENSE' removed 'h2-4.1.0.dist-info/top_level.txt' removed 'h2-4.1.0.dist-info/METADATA' removed directory 'h2-4.1.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2226377-52909/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1704931200 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.Ah2ROe + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) Handling tox-current-env >= 0.0.6 from tox itself Requirement satisfied: tox-current-env >= 0.0.6 (installed: tox-current-env 0.0.11) py312: OK (3.37 seconds) congratulations :) (18.20 seconds) Handling tox from tox --print-deps-only: py312 Requirement satisfied: tox (installed: tox 4.11.1) Handling pytest>=6.0.1 from tox --print-deps-only: py312 Requirement satisfied: pytest>=6.0.1 (installed: pytest 7.3.2) Handling hypothesis>=5.5,<7 from tox --print-deps-only: py312 Requirement satisfied: hypothesis>=5.5,<7 (installed: hypothesis 6.82.0) running dist_info writing h2.egg-info/PKG-INFO writing dependency_links to h2.egg-info/dependency_links.txt writing requirements to h2.egg-info/requires.txt writing top-level names to h2.egg-info/top_level.txt reading manifest file 'h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'h2.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/h2-4.1.0/h2-4.1.0.dist-info' Handling hyperframe <7,>=6.0 from hook generated metadata: Requires-Dist (h2) Requirement satisfied: hyperframe <7,>=6.0 (installed: hyperframe 6.0.1) Handling hpack <5,>=4.0 from hook generated metadata: Requires-Dist (h2) Requirement satisfied: hpack <5,>=4.0 (installed: hpack 4.0.0) + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv h2-4.1.0.dist-info/ removed 'h2-4.1.0.dist-info/LICENSE' removed 'h2-4.1.0.dist-info/top_level.txt' removed 'h2-4.1.0.dist-info/METADATA' removed directory 'h2-4.1.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.fSCRPF + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd h2-4.1.0 + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir Processing /builddir/build/BUILD/h2-4.1.0 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) running dist_info creating /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2.egg-info writing /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2.egg-info/PKG-INFO writing dependency_links to /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2.egg-info/dependency_links.txt writing requirements to /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2.egg-info/requires.txt writing top-level names to /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2.egg-info/top_level.txt writing manifest file '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2.egg-info/SOURCES.txt' reading manifest file '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-lewidt5p/h2-4.1.0.dist-info' Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: h2 Building wheel for h2 (pyproject.toml): started Running command Building wheel for h2 (pyproject.toml) running bdist_wheel running build running build_py creating build creating build/lib creating build/lib/h2 copying src/h2/events.py -> build/lib/h2 copying src/h2/utilities.py -> build/lib/h2 copying src/h2/connection.py -> build/lib/h2 copying src/h2/errors.py -> build/lib/h2 copying src/h2/windows.py -> build/lib/h2 copying src/h2/frame_buffer.py -> build/lib/h2 copying src/h2/exceptions.py -> build/lib/h2 copying src/h2/settings.py -> build/lib/h2 copying src/h2/__init__.py -> build/lib/h2 copying src/h2/stream.py -> build/lib/h2 copying src/h2/config.py -> build/lib/h2 installing to build/bdist.linux-riscv64/wheel running install running install_lib creating build/bdist.linux-riscv64 creating build/bdist.linux-riscv64/wheel creating build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/events.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/utilities.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/connection.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/errors.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/windows.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/frame_buffer.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/exceptions.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/settings.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/__init__.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/stream.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/config.py -> build/bdist.linux-riscv64/wheel/h2 running install_egg_info running egg_info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Copying src/h2.egg-info to build/bdist.linux-riscv64/wheel/h2-4.1.0-py3.12.egg-info running install_scripts creating build/bdist.linux-riscv64/wheel/h2-4.1.0.dist-info/WHEEL creating '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-wheel-4tduxrtq/.tmp-z7uyrqn6/h2-4.1.0-py3-none-any.whl' and adding 'build/bdist.linux-riscv64/wheel' to it adding 'h2/__init__.py' adding 'h2/config.py' adding 'h2/connection.py' adding 'h2/errors.py' adding 'h2/events.py' adding 'h2/exceptions.py' adding 'h2/frame_buffer.py' adding 'h2/settings.py' adding 'h2/stream.py' adding 'h2/utilities.py' adding 'h2/windows.py' adding 'h2-4.1.0.dist-info/LICENSE' adding 'h2-4.1.0.dist-info/METADATA' adding 'h2-4.1.0.dist-info/WHEEL' adding 'h2-4.1.0.dist-info/top_level.txt' adding 'h2-4.1.0.dist-info/RECORD' removing build/bdist.linux-riscv64/wheel Building wheel for h2 (pyproject.toml): finished with status 'done' Created wheel for h2: filename=h2-4.1.0-py3-none-any.whl size=57464 sha256=899db618433cd9041d1d98b56fc4b904e32fee492961efaeab89a43cbb600271 Stored in directory: /builddir/.cache/pip/wheels/42/0e/c2/d8f850eabf7f97289d30ba1b1585175875c2b090a973af407d Successfully built h2 + PYTHONPATH=/builddir/build/BUILD/h2-4.1.0/build/lib.linux-riscv64-cpython-312 + sphinx-build docs/source html Running Sphinx v7.2.6 making output directory... done loading intersphinx inventory from https://docs.python.org/objects.inv... loading intersphinx inventory from https://hpack.readthedocs.io/en/stable/objects.inv... loading intersphinx inventory from https://pyopenssl.readthedocs.org/en/latest/objects.inv... WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://docs.python.org/objects.inv' not fetchable due to : HTTPSConnectionPool(host='docs.python.org', port=443): Max retries exceeded with url: /objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://pyopenssl.readthedocs.org/en/latest/objects.inv' not fetchable due to : HTTPSConnectionPool(host='pyopenssl.readthedocs.org', port=443): Max retries exceeded with url: /en/latest/objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://hpack.readthedocs.io/en/stable/objects.inv' not fetchable due to : HTTPSConnectionPool(host='hpack.readthedocs.io', port=443): Max retries exceeded with url: /en/stable/objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) building [mo]: targets for 0 po files that are out of date writing output... building [html]: targets for 21 source files that are out of date updating environment: [new config] 21 added, 0 changed, 0 removed reading sources... [ 5%] advanced-usage reading sources... [ 10%] api reading sources... [ 14%] asyncio-example reading sources... [ 19%] basic-usage reading sources... [ 24%] curio-example reading sources... [ 29%] eventlet-example reading sources... [ 33%] examples reading sources... [ 38%] gevent-example reading sources... [ 43%] index reading sources... [ 48%] installation reading sources... [ 52%] low-level reading sources... [ 57%] negotiating-http2 reading sources... [ 62%] plain-sockets-example reading sources... [ 67%] release-notes reading sources... [ 71%] release-process reading sources... [ 76%] testimonials reading sources... [ 81%] tornado-example reading sources... [ 86%] twisted-example reading sources... [ 90%] twisted-head-example reading sources... [ 95%] twisted-post-example reading sources... [100%] wsgi-example looking for now-outdated files... none found pickling environment... WARNING: autodoc: failed to import class 'connection.H2Connection' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'config.H2Configuration' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.RequestReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.ResponseReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.TrailersReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.InformationalResponseReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.DataReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.WindowUpdated' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.RemoteSettingsChanged' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.PingReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.PingAckReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.StreamEnded' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.StreamReset' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.PushedStreamReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.SettingsAcknowledged' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.PriorityUpdated' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.ConnectionTerminated' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.AlternativeServiceAvailable' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.UnknownFrameReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.H2Error' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.NoSuchStreamError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.StreamClosedError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.RFC1122Error' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.ProtocolError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.FrameTooLargeError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.FrameDataMissingError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.TooManyStreamsError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.FlowControlError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.StreamIDTooLowError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.InvalidSettingsValueError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.NoAvailableStreamIDError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.InvalidBodyLengthError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.UnsupportedFrameError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.DenialOfServiceError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import module 'errors' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'settings.SettingCodes' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'settings.Settings' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'settings.ChangedSetting' from module 'h2'; the following exception was raised: No module named 'h2' done checking consistency... done preparing documents... done copying assets... copying static files... done copying extra files... done done writing output... [ 5%] advanced-usage writing output... [ 10%] api writing output... [ 14%] asyncio-example writing output... [ 19%] basic-usage writing output... [ 24%] curio-example writing output... [ 29%] eventlet-example writing output... [ 33%] examples writing output... [ 38%] gevent-example writing output... [ 43%] index writing output... [ 48%] installation writing output... [ 52%] low-level writing output... [ 57%] negotiating-http2 writing output... [ 62%] plain-sockets-example writing output... [ 67%] release-notes writing output... [ 71%] release-process writing output... [ 76%] testimonials writing output... [ 81%] tornado-example writing output... [ 86%] twisted-example writing output... [ 90%] twisted-head-example writing output... [ 95%] twisted-post-example writing output... [100%] wsgi-example generating indices... genindex done highlighting module code... writing additional pages... search done copying images... [ 50%] _static/h2.connection.H2ConnectionStateMachine.dot.png copying images... [100%] _static/h2.stream.H2StreamStateMachine.dot.png dumping search index in English (code: en)... done dumping object inventory... done build succeeded, 41 warnings. The HTML pages are in html. + rm -rf html/.doctrees html/.buildinfo + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.lP5Leu + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd h2-4.1.0 ++ ls /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir/h2-4.1.0-py3-none-any.whl ++ xargs basename --multiple ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=h2==4.1.0 + '[' -z h2==4.1.0 ']' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir h2==4.1.0 Using pip 23.3.1 from /usr/lib/python3.12/site-packages/pip (python 3.12) Looking in links: /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir Processing ./pyproject-wheeldir/h2-4.1.0-py3-none-any.whl Installing collected packages: h2 Successfully installed h2-4.1.0 + '[' -d /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + site_dirs+=("/usr/lib/python3.12/site-packages") + '[' /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib64/python3.12/site-packages '!=' /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib64/python3.12/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.12/site-packages/h2-4.1.0.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch --record /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/RECORD --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/REQUESTED' ++ wc -l /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-ghost-distinfo ++ cut -f1 '-d ' + lines=1 + '[' 1 -ne 1 ']' + RPM_PERCENTAGES_COUNT=2 + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch --sitelib /usr/lib/python3.12/site-packages --sitearch /usr/lib64/python3.12/site-packages --python-version 3.12 --pyproject-record /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-record --prefix /usr h2 + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 4.1.0-1.fc40 --unique-debug-suffix -4.1.0-1.fc40.noarch --unique-debug-src-base python-h2-4.1.0-1.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/h2-4.1.0 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.wd3JwA + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd h2-4.1.0 + TOX_TESTENV_PASSENV='*' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + HOSTNAME=rpmbuild + /usr/bin/python3 -m tox --current-env -q --recreate -e py312 ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.3.2, pluggy-1.3.0 cachedir: .tox/py312/.pytest_cache rootdir: /builddir/build/BUILD/h2-4.1.0 configfile: setup.cfg testpaths: test plugins: hypothesis-6.82.0 collected 1433 items test/test_basic_logic.py ..........................................F.... [ 3%] ........................................................................ [ 8%] ........................................................................ [ 13%] ........................................................................ [ 18%] ........................................................................ [ 23%] ................................................. [ 26%] test/test_closed_streams.py ..................... [ 28%] test/test_complex_logic.py ................. [ 29%] test/test_config.py .................................................... [ 33%] .............. [ 34%] test/test_events.py F.F......................................... [ 37%] test/test_exceptions.py . [ 37%] test/test_flow_control_window.py ...................................FF.. [ 39%] ...FFF [ 40%] test/test_h2_upgrade.py ............. [ 41%] test/test_head_request.py .. [ 41%] test/test_header_indexing.py ........................................... [ 44%] ........................................................................ [ 49%] ........................................................................ [ 54%] ........................................................................ [ 59%] ......................................... [ 62%] test/test_informational_responses.py .............................. [ 64%] test/test_interacting_stacks.py . [ 64%] test/test_invalid_content_lengths.py ... [ 64%] test/test_invalid_frame_sequences.py ....................... [ 66%] test/test_invalid_headers.py ........................................... [ 69%] ........................................................................ [ 74%] ......F.F...F.FFFF.FFF..F...F.FFFF...F.................................. [ 79%] ........................................................................ [ 84%] ........................................................................ [ 89%] ................................. [ 91%] test/test_priority.py ....................... [ 93%] test/test_related_events.py ............. [ 94%] test/test_rfc7838.py ................... [ 95%] test/test_rfc8441.py . [ 95%] test/test_settings.py .......................F.FFF.... [ 97%] test/test_state_machines.py .................. [ 99%] test/test_stream_reset.py ..... [ 99%] test/test_utility_functions.py ........ [100%] =================================== FAILURES =================================== _________________ TestBasicClient.test_changing_max_frame_size _________________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffffb905bce0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = frame_size = 16487 @given(frame_size=integers(min_value=2**14, max_value=(2**24 - 1))) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_basic_logic.py:793: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 16487) kwargs = {}, initial_draws = 1, start = 4904386.821546053, result = None finish = 4904387.097769307, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=276223) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 276.22ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(frame_size=integers(min_value=2**14, max_value=(2**24 - 1))) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_basic_logic.py:793: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 276.22ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_changing_max_frame_size(self=, frame_factory=, frame_size=16487) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_changing_max_frame_size( E self=, E frame_factory=, E frame_size=16487, E ) E Unreliable test timings! On an initial run, this test took 276.22ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 9.78 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________ TestRemoteSettingsChanged.test_building_settings_from_scratch _________ self = data = ConjectureData(INTERESTING, 45 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 45 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 45 bytes, frozen) function = .run at 0xffffffb8e06660> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 45 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = settings_list = [(0, 32376), (11728, 5786), (13152, 51804), (14660, 38)] @given(SETTINGS_STRATEGY) > def test_building_settings_from_scratch(self, settings_list): test/test_events.py:41: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(0, 32376), (11728, 5786), (13152, 51804), (14660, 38)]) kwargs = {}, initial_draws = 1, start = 4904520.55843231, result = None finish = 4904520.887333955, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=328902) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 328.90ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(SETTINGS_STRATEGY) > def test_building_settings_from_scratch(self, settings_list): test/test_events.py:41: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 45 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 328.90ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_building_settings_from_scratch(self=, settings_list=[(0, 32376), (11728, 5786), (13152, 51804), (14660, 38)]) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_building_settings_from_scratch( E self=, E settings_list=[(0, 32376), (11728, 5786), (13152, 51804), (14660, 38)], E ) E Unreliable test timings! On an initial run, this test took 328.90ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.90 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ______ TestRemoteSettingsChanged.test_correctly_reports_changed_settings _______ self = data = ConjectureData(INTERESTING, 115 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 115 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 115 bytes, frozen) function = .run at 0xffffffb905a5c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 115 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = old_settings_list = [(4072, 48543), (63924, 59777), (12052, 29), (17111, 49699), (10323, 906990932), (0, 11275), ...] new_settings_list = [(6974, 14826)] @given(SETTINGS_STRATEGY, SETTINGS_STRATEGY) > def test_correctly_reports_changed_settings(self, old_settings_list, new_settings_list): test/test_events.py:77: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(4072, 48543), (63924, 59777), (12052, 29), (17111, 49699), (10323, 906990932), (0, 11275), ...], [(6974, 14826)]) kwargs = {}, initial_draws = 2, start = 4904604.601876832, result = None finish = 4904604.920219279, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=318342) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 318.34ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(SETTINGS_STRATEGY, SETTINGS_STRATEGY) > def test_correctly_reports_changed_settings(self, old_settings_list, new_settings_list): test/test_events.py:77: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 115 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 318.34ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_correctly_reports_changed_settings(self=, old_settings_list=[(4072, 48543), E (63924, 59777), E (12052, 29), E (17111, 49699), E (10323, 906990932), E (0, 11275), E (21131, 29882), E (51873, 4)], new_settings_list=[(6974, 14826)]) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_correctly_reports_changed_settings( E self=, E old_settings_list=[(4072, 48543), E (63924, 59777), E (12052, 29), E (17111, 49699), E (10323, 906990932), E (0, 11275), E (21131, 29882), E (51873, 4)], E new_settings_list=[(6974, 14826)], E ) E Unreliable test timings! On an initial run, this test took 318.34ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.51 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________ TestAutomaticFlowControl.test_must_acknowledge_for_stream ___________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb8e43380> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = stream_id = -70 @given(stream_id=integers(max_value=0)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:718: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , -70) kwargs = {}, initial_draws = 1, start = 4904654.549619124, result = None finish = 4904654.869807571, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=320188) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 320.19ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(stream_id=integers(max_value=0)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:718: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 320.19ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_must_acknowledge_for_stream(self=, frame_factory=, stream_id=-70) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_must_acknowledge_for_stream( E self=, E frame_factory=, E stream_id=-70, E ) E Unreliable test timings! On an initial run, this test took 320.19ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 8.15 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______ TestAutomaticFlowControl.test_cannot_acknowledge_less_than_zero ________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffffb8cfcfe0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = size = -11023 @given(size=integers(max_value=-1)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:744: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , -11023) kwargs = {}, initial_draws = 1, start = 4904670.601352355, result = None finish = 4904670.917910202, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=316558) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 316.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(size=integers(max_value=-1)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:744: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 316.56ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_acknowledge_less_than_zero(self=, frame_factory=, size=-11023) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_acknowledge_less_than_zero( E self=, E frame_factory=, E size=-11023, E ) E Unreliable test timings! On an initial run, this test took 316.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 6.90 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestAutomaticFlowControl.test_acknowledging_1024_bytes_when_empty_increments _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb8cff380> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = increment = 1025 @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:842: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 1025) kwargs = {}, initial_draws = 1, start = 4904693.190249998, result = None finish = 4904693.711422011, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=521172) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 521.17ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:842: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 521.17ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_acknowledging_1024_bytes_when_empty_increments(self=, frame_factory=, increment=1025) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_acknowledging_1024_bytes_when_empty_increments( E self=, E frame_factory=, E increment=1025, E ) E Unreliable test timings! On an initial run, this test took 521.17ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 7.93 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________ TestAutomaticFlowControl.test_connection_only_empty ______________ self = frame_factory = @given(integers(min_value=1025, max_value=(DEFAULT_FLOW_WINDOW // 4) - 1)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:879: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 7024) kwargs = {}, initial_draws = 1, start = 4904722.193315274, result = None finish = 4904722.504613622, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=311298) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 311.30ms, which exceeds the deadline of 200.00ms E Falsifying example: test_connection_only_empty( E self=, E frame_factory=, E increment=7024, E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ______________ TestAutomaticFlowControl.test_mixing_update_forms _______________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb8ceca40> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = increment = 31481 @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:923: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 31481) kwargs = {}, initial_draws = 1, start = 4904727.831393336, result = None finish = 4904728.308089057, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=476696) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 476.70ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:923: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 476.70ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_mixing_update_forms(self=, frame_factory=, increment=31481) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_mixing_update_forms( E self=, E frame_factory=, E increment=31481, E ) E Unreliable test timings! On an initial run, this test took 476.70ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 10.10 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags0-validate_headers] _ self = data = ConjectureData(INTERESTING, 298 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 298 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 298 bytes, frozen) function = .run at 0xffffffb8cfd8a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 298 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\x90\xd5vi\xac;K\xb1\xfeFv6\xfa\xe0\xbe', b''), (b'\x90\xd5vi\xac;K\xb1\xfeFv6\xfa\xe0\xbe', b",\xc8\x86'\x90\x80\...6y\x90\x10\xff\x04\xe0"), (b'n\n', b'N\xa0\xcb'), (b'\x11\xa0\x00GB', b''), (b'\xa6!', b'd{'), (b'i\x97', b'SxR'), ...] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\x90\xd5vi\xac;K\xb1\xfeFv6\xfa\xe0\xbe', b'')...ffffffb9f33ce0>, HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 4904872.017489055, result = None finish = 4904872.296540009, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=279051) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 279.05ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 298 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 279.05ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\x90\xd5vi\xac;K\xb1\xfeFv6\xfa\xe0\xbe', b''), E (b'\x90\xd5vi\xac;K\xb1\xfeFv6\xfa\xe0\xbe', E b",\xc8\x86'\x90\x80\x91\xcc\x1f\xa6y\x90\x10\xff\x04\xe0"), E (b'n\n', b'N\xa0\xcb'), E (b'\x11\xa0\x00GB', b''), E (b'\xa6!', b'd{'), E (b'i\x97', b'SxR'), E (b'\xf2\x16', b'-')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=True), E headers=[(b'\x90\xd5vi\xac;K\xb1\xfeFv6\xfa\xe0\xbe', b''), E (b'\x90\xd5vi\xac;K\xb1\xfeFv6\xfa\xe0\xbe', E b",\xc8\x86'\x90\x80\x91\xcc\x1f\xa6y\x90\x10\xff\x04\xe0"), E (b'n\n', b'N\xa0\xcb'), E (b'\x11\xa0\x00GB', b''), E (b'\xa6!', b'd{'), E (b'i\x97', b'SxR'), E (b'\xf2\x16', b'-')], E ) E Unreliable test timings! On an initial run, this test took 279.05ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.44 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags1-validate_headers] _ self = data = ConjectureData(INTERESTING, 458 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 458 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 458 bytes, frozen) function = .run at 0xffffffb8e422a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 458 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\xc7\xc9j\xc6\x92\xbaP\xab\x0bfe\xc5', b'\xff`\xear\x84\xac\x17\x9d\xc1\xec\xfb'), (b'(\x1b\xb2\xacp\x13M~<8\xd2',...fa\xe6\xfe\xed\x8b'), (b'#V\x1d', b'\xe0\xa9&b'), (b'\x86\xae.\xb4\xf8\xce\xd7v|\x8f\xb2\x01', b'KS\xe7\x14\xeb'), ...] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\xc7\xc9j\xc6\x92\xbaP\xab\x0bfe\xc5', b'\xff`...fffffb9f33ce0>, HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4904977.970355833, result = None finish = 4904978.301590878, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=331235) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 331.24ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 458 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 331.24ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\xc7\xc9j\xc6\x92\xbaP\xab\x0bfe\xc5', E b'\xff`\xear\x84\xac\x17\x9d\xc1\xec\xfb'), E (b'(\x1b\xb2\xacp\x13M~<8\xd2', b'\xda>m'), E (b'\xa3', b'\x02\xff\x1d\x8cQT\xe1n\xeb\xa2'), E (b'\xbb\xf0', b'\x12\xfa\xe6\xfe\xed\x8b'), E (b'#V\x1d', b'\xe0\xa9&b'), E (b'\x86\xae.\xb4\xf8\xce\xd7v|\x8f\xb2\x01', b'KS\xe7\x14\xeb'), E (b'"9', b'\xedO\xa1'), E (b'!q\xc5\x86', b'R3\xc0\x02\x00+\xa5\xd0\xae\xa4'), E (b'\xcbD', b'\x9b>\x86'), E (b'\xa0', b'x\x04')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False), E headers=[(b'\xc7\xc9j\xc6\x92\xbaP\xab\x0bfe\xc5', E b'\xff`\xear\x84\xac\x17\x9d\xc1\xec\xfb'), E (b'(\x1b\xb2\xacp\x13M~<8\xd2', b'\xda>m'), E (b'\xa3', b'\x02\xff\x1d\x8cQT\xe1n\xeb\xa2'), E (b'\xbb\xf0', b'\x12\xfa\xe6\xfe\xed\x8b'), E (b'#V\x1d', b'\xe0\xa9&b'), E (b'\x86\xae.\xb4\xf8\xce\xd7v|\x8f\xb2\x01', b'KS\xe7\x14\xeb'), E (b'"9', b'\xedO\xa1'), E (b'!q\xc5\x86', b'R3\xc0\x02\x00+\xa5\xd0\xae\xa4'), E (b'\xcbD', b'\x9b>\x86'), E (b'\xa0', b'x\x04')], E ) E Unreliable test timings! On an initial run, this test took 331.24ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.40 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags3-validate_headers] _ self = data = ConjectureData(INTERESTING, 87 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 87 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 87 bytes, frozen) function = .run at 0xffffffb8868400> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 87 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b')\xb9\xf37\xec5', b'\xc3dY\x97'), (b'}+Cr,', b'\x96\xb9\xf37\xec5')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b')\xb9\xf37\xec5', b'\xc3dY\x97'), (b'}+Cr,', b...ffffb9f33ce0>, HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4905199.953785476, result = None finish = 4905200.327401309, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=373616) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 373.62ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 87 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 373.62ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b')\xb9\xf37\xec5', b'\xc3dY\x97'), (b'}+Cr,', b'\x96\xb9\xf37\xec5')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False), E headers=[(b')\xb9\xf37\xec5', b'\xc3dY\x97'), E (b'}+Cr,', b'\x96\xb9\xf37\xec5')], E ) E Unreliable test timings! On an initial run, this test took 373.62ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.53 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags4-validate_headers] _ self = data = ConjectureData(INTERESTING, 40 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 40 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 40 bytes, frozen) function = .run at 0xffffffb90c6520> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 40 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\xc6', b'\x92\xd9\x11R'), (b'\x87\xc9\xf8', b'')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\xc6', b'\x92\xd9\x11R'), (b'\x87\xc9\xf8', b'...fffffb9f33ce0>, HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 4905353.828223244, result = None finish = 4905354.111466517, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=283243) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 283.24ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 40 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 283.24ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\xc6', b'\x92\xd9\x11R'), (b'\x87\xc9\xf8', b'')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=True), E headers=[(b'\xc6', b'\x92\xd9\x11R'), (b'\x87\xc9\xf8', b'')], E ) E Unreliable test timings! On an initial run, this test took 283.24ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.47 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags4-validate_outbound_headers] _ self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.21 seconds (0 invalid ones and 2 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_invalid_headers.py:509: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(107181424807040046205619004169421142049) to this test or run pytest with --hypothesis-seed=107181424807040046205619004169421142049 to reproduce this failure. _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags5-validate_headers] _ self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.22 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_invalid_headers.py:509: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(90679541300013936616247583822931628401) to this test or run pytest with --hypothesis-seed=90679541300013936616247583822931628401 to reproduce this failure. _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags5-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 1084 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 1084 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 1084 bytes, frozen) function = .run at 0xffffffb8e43060> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 1084 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'H', b'd\x88\xa6p'), (b'\x01', b'N\x13\xd8l\xde{\xf9A\x13'), (b'\x92h', b'\xb3\x07A\xd11\xbb\x9e'), (b'D\xee?\x01', b'\xe4`V\xc6\x12'), (b'*\xd2', b'h\x94\x04\x1c'), (b'\xf2', b''), ...] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'H', b'd\x88\xa6p'), (b'\x01', b'N\x13\xd8l\xde...ffffb9f4c9a0>, HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4905445.427441351, result = None finish = 4905445.711299624, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=283858) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 283.86ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 1084 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 283.86ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'H', b'd\x88\xa6p'), E (b'\x01', b'N\x13\xd8l\xde{\xf9A\x13'), E (b'\x92h', b'\xb3\x07A\xd11\xbb\x9e'), E (b'D\xee?\x01', b'\xe4`V\xc6\x12'), E (b'*\xd2', b'h\x94\x04\x1c'), E (b'\xf2', b''), E (b'\xcfL', b''), E (b'D\x81\x05\xbb\xe2G\xe42', b'U3\x02U\x14X\xc8*'), E (b'\xfcm\x916&\x16', E b'\x87\xba`v\xb3\x87\xc4x\xe2\xbc\x01\xab\xe9@9\xac\xf8\t\x93\x90\x8e'), E (b'\xfb', b'.x\x03D\xc5\xca\x06WM\xa7\x85dW|#\x1eJw`Q\xc9'), E (b'\xf7\xfe\x1f\x9c\x05]\xb3\x87\xc3HD\xd2n\xe5\xb5R\x16\xb8S\xff\x92\xc5', E b'\x84j\x94\xb8\xe6[@\xe1\x92\xda\xffd\n\x0ev\xe7\xfe'), E (b'0', b''), E (b'}', b'\xe8\xcb\xa6\x91G'), E (b'O', b'\xff'), E (b'\xf9\x00', b'&\x0b\x91'), E (b'G\xab\xfe3\xc0\x19\x1f', b'\xb9'), E (b'%G\xc3+\xdeG\xbf\xd5\xae\xbb', E b'\tsy\xdb\xb0\xbe\x0b\xc67I\xec\xb7\x19\x1fz'), E (b'\xa2\xc7\xa2\x94\x16\x0f~v=', b'v\x916^~'), E (b'\xd6\x83\xce\xc1\x8a|(\\\xe0\xbbL', b'\xcf\xc0}'), E (b'/\xc3\xa5\x88\xc3x', b'\x0e\xd7r\xf9\xeeM\xd4\r\x00\xa3z\xda\xd5\x8f\x10'), E (b'\x01\xd4\x03', b'+J\n\xa0\x8cT\xea\xce\xf1')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False), E headers=[(b'H', b'd\x88\xa6p'), E (b'\x01', b'N\x13\xd8l\xde{\xf9A\x13'), E (b'\x92h', b'\xb3\x07A\xd11\xbb\x9e'), E (b'D\xee?\x01', b'\xe4`V\xc6\x12'), E (b'*\xd2', b'h\x94\x04\x1c'), E (b'\xf2', b''), E (b'\xcfL', b''), E (b'D\x81\x05\xbb\xe2G\xe42', b'U3\x02U\x14X\xc8*'), E (b'\xfcm\x916&\x16', E b'\x87\xba`v\xb3\x87\xc4x\xe2\xbc\x01\xab\xe9@9\xac\xf8\t\x93\x90\x8e'), E (b'\xfb', b'.x\x03D\xc5\xca\x06WM\xa7\x85dW|#\x1eJw`Q\xc9'), E (b'\xf7\xfe\x1f\x9c\x05]\xb3\x87\xc3HD\xd2n\xe5\xb5R\x16\xb8S\xff\x92\xc5', E b'\x84j\x94\xb8\xe6[@\xe1\x92\xda\xffd\n\x0ev\xe7\xfe'), E (b'0', b''), E (b'}', b'\xe8\xcb\xa6\x91G'), E (b'O', b'\xff'), E (b'\xf9\x00', b'&\x0b\x91'), E (b'G\xab\xfe3\xc0\x19\x1f', b'\xb9'), E (b'%G\xc3+\xdeG\xbf\xd5\xae\xbb', E b'\tsy\xdb\xb0\xbe\x0b\xc67I\xec\xb7\x19\x1fz'), E (b'\xa2\xc7\xa2\x94\x16\x0f~v=', b'v\x916^~'), E (b'\xd6\x83\xce\xc1\x8a|(\\\xe0\xbbL', b'\xcf\xc0}'), E (b'/\xc3\xa5\x88\xc3x', E b'\x0e\xd7r\xf9\xeeM\xd4\r\x00\xa3z\xda\xd5\x8f\x10'), E (b'\x01\xd4\x03', b'+J\n\xa0\x8cT\xea\xce\xf1')], E ) E Unreliable test timings! On an initial run, this test took 283.86ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.60 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags6-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 22 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 22 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) function = .run at 0xffffffb858dc60> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b']F\x9b\xb4', b'')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b']F\x9b\xb4', b'')], , HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 4905571.818799449, result = None finish = 4905572.316397925, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=497598) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 497.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 22 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 497.60ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b']F\x9b\xb4', b'')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True), E headers=[(b']F\x9b\xb4', b'')], E ) E Unreliable test timings! On an initial run, this test took 497.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.35 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags7-validate_headers] _ self = data = ConjectureData(INTERESTING, 43 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 43 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 43 bytes, frozen) function = .run at 0xffffffb9059760> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 43 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'(\x125.', b'\xc0\x03\xe8\x00\xa07')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'(\x125.', b'\xc0\x03\xe8\x00\xa07')], , HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4905637.39358723, result = None finish = 4905637.715669186, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=322082) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 322.08ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 43 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 322.08ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'(\x125.', b'\xc0\x03\xe8\x00\xa07')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False), E headers=[(b'(\x125.', b'\xc0\x03\xe8\x00\xa07')], E ) E Unreliable test timings! On an initial run, this test took 322.08ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.35 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags7-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 145 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 145 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 145 bytes, frozen) function = .run at 0xffffffb90c6700> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 145 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\xaa\x10\xc7B;\xad!\xf9\xf1`\xa9zii', b'\xf5)'), (b'X(\x9e\xdc\x8b\xc5\xa1\xe4\xb2', b'\x1aO~\xe8L\x81w\xfc\xfd')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\xaa\x10\xc7B;\xad!\xf9\xf1`\xa9zii', b'\xf5)'...fffb9f4c9a0>, HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4905717.431227524, result = None finish = 4905717.716139796, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=284912) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 284.91ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 145 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 284.91ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\xaa\x10\xc7B;\xad!\xf9\xf1`\xa9zii', b'\xf5)'), E (b'X(\x9e\xdc\x8b\xc5\xa1\xe4\xb2', b'\x1aO~\xe8L\x81w\xfc\xfd')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False), E headers=[(b'\xaa\x10\xc7B;\xad!\xf9\xf1`\xa9zii', b'\xf5)'), E (b'X(\x9e\xdc\x8b\xc5\xa1\xe4\xb2', b'\x1aO~\xe8L\x81w\xfc\xfd')], E ) E Unreliable test timings! On an initial run, this test took 284.91ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.35 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags9-validate_headers] _ self = data = ConjectureData(INTERESTING, 454 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 454 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 454 bytes, frozen) function = .run at 0xffffffb845ea20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 454 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\xa8\xca\x19\xeb\xc3\x916\xaa', b'\x06\xdc\xc5ux'), (b'\x93\x01\x97BBo\x13\xe4:\x0eX', b'\xdf\x8c\xd1'), (b'\xb6%>...\xb3', b'\xfc\x8c\xc3\xa9\xccK\xda\x01\xcb;\x8ea\x9d\x8c\x19'), (b'\xc6z\xc6\t\xb5U\xb0\x93\xe2\xcd\xe2qJ', b'B'), ...] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\xa8\xca\x19\xeb\xc3\x916\xaa', b'\x06\xdc\xc5...ffffb9f33ce0>, HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4905921.245480388, result = None finish = 4905921.617080821, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=371600) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 371.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 454 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 371.60ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\xa8\xca\x19\xeb\xc3\x916\xaa', b'\x06\xdc\xc5ux'), E (b'\x93\x01\x97BBo\x13\xe4:\x0eX', b'\xdf\x8c\xd1'), E (b'\xb6%>Ol\xf8T1W\x87\xb7F\xd7', b''), E (b'\xd5\x86\x07', b''), E (b'\xe2\xed\xb8\x8e/\xf2\xb5s\x17\x08\xcf%\r\xb3', E b'\xfc\x8c\xc3\xa9\xccK\xda\x01\xcb;\x8ea\x9d\x8c\x19'), E (b'\xc6z\xc6\t\xb5U\xb0\x93\xe2\xcd\xe2qJ', b'B'), E (b'\xeb', b'P8'), E (b'\xfc\xde0\xeb\x19\xbe\xceG\xf2"n\xb7', b'\xaa'), E (b',\xf5\xd3', b'\xe1\xab')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=True, is_push_promise=False), E headers=[(b'\xa8\xca\x19\xeb\xc3\x916\xaa', b'\x06\xdc\xc5ux'), E (b'\x93\x01\x97BBo\x13\xe4:\x0eX', b'\xdf\x8c\xd1'), E (b'\xb6%>Ol\xf8T1W\x87\xb7F\xd7', b''), E (b'\xd5\x86\x07', b''), E (b'\xe2\xed\xb8\x8e/\xf2\xb5s\x17\x08\xcf%\r\xb3', E b'\xfc\x8c\xc3\xa9\xccK\xda\x01\xcb;\x8ea\x9d\x8c\x19'), E (b'\xc6z\xc6\t\xb5U\xb0\x93\xe2\xcd\xe2qJ', b'B'), E (b'\xeb', b'P8'), E (b'\xfc\xde0\xeb\x19\xbe\xceG\xf2"n\xb7', b'\xaa'), E (b',\xf5\xd3', b'\xe1\xab')], E ) E Unreliable test timings! On an initial run, this test took 371.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.95 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags11-validate_headers] _ self = data = ConjectureData(INTERESTING, 73 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 73 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 73 bytes, frozen) function = .run at 0xffffffb8e43a60> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 73 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'P\xaf', b'P\xaf'), (b'\xc2\x00\xd6', b'ag2\x98='), (b'\xc9', b'\xe2')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'P\xaf', b'P\xaf'), (b'\xc2\x00\xd6', b'ag2\x98...fffb9f33ce0>, HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4906223.797517124, result = None finish = 4906224.134214364, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=336697) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 336.70ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 73 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 336.70ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'P\xaf', b'P\xaf'), (b'\xc2\x00\xd6', b'ag2\x98='), (b'\xc9', b'\xe2')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=False), E headers=[(b'P\xaf', b'P\xaf'), E (b'\xc2\x00\xd6', b'ag2\x98='), E (b'\xc9', b'\xe2')], E ) E Unreliable test timings! On an initial run, this test took 336.70ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.51 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags12-validate_headers] _ self = data = ConjectureData(INTERESTING, 745 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 745 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 745 bytes, frozen) function = .run at 0xffffffb8cec2c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 745 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\x08\xe2\x96\x17\x18', b'\x8b\x04O'), (b'4\x17\x11>.\xff\xbc\xff', b'\x06w'), (b'hL\xda', b'?\x8b\xc3\xac'), (b'\x...x8e', b''), (b':\x07\xd40v\xb2', b'Z\\\x8fjc\x9c\xbf\t\x94\x05D\xf3\x1b|'), (b'\x19\x13G\xed\x9e\x00', b'h\xcf('), ...] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\x08\xe2\x96\x17\x18', b'\x8b\x04O'), (b'4\x17...ffffb9f33ce0>, HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 4906327.812057691, result = None finish = 4906328.135525846, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=323468) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 323.47ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 745 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 323.47ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\x08\xe2\x96\x17\x18', b'\x8b\x04O'), E (b'4\x17\x11>.\xff\xbc\xff', b'\x06w'), E (b'hL\xda', b'?\x8b\xc3\xac'), E (b'\xbe\x00D\x88\x8e', b''), E (b':\x07\xd40v\xb2', b'Z\\\x8fjc\x9c\xbf\t\x94\x05D\xf3\x1b|'), E (b'\x19\x13G\xed\x9e\x00', b'h\xcf('), E (b')k\\P9\xa0', b'\x90\xc1\x97\xd0aSA'), E (b'-\xe4\x1e\xc2\xdd\x0f', b'8\xff\xba'), E (b'\x7f\x1dOx-\x07\xaa', b'E\xbf'), E (b'G\x15', b'I\xf8Q\xfe\x878\x1f\x01\\<\xc7\xbfU\xfa\xb8\xeb'), E (b'\x0e', E b'\x00\x94\xe5\x94j1\x00o\x1f\x06\xbe\xc3\x17[5\x8e\xefT7o\xd3\xba\xf5\xfb\x80\x00\x01\x13\xb1d\x9e'), E (b' L\xc4\xb9\xd2', b'!0\x00'), E (b'T\xbb', b'\x0f\xb3'), E (b'\x93\x8c1r\xdd\xc8\x01\xad\xe8', E b'\x10\xff\xa1\xe1Zx\x8a\x1d\xbd\xdf\xffi\x9d\x11\xa6')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True), E headers=[(b'\x08\xe2\x96\x17\x18', b'\x8b\x04O'), E (b'4\x17\x11>.\xff\xbc\xff', b'\x06w'), E (b'hL\xda', b'?\x8b\xc3\xac'), E (b'\xbe\x00D\x88\x8e', b''), E (b':\x07\xd40v\xb2', b'Z\\\x8fjc\x9c\xbf\t\x94\x05D\xf3\x1b|'), E (b'\x19\x13G\xed\x9e\x00', b'h\xcf('), E (b')k\\P9\xa0', b'\x90\xc1\x97\xd0aSA'), E (b'-\xe4\x1e\xc2\xdd\x0f', b'8\xff\xba'), E (b'\x7f\x1dOx-\x07\xaa', b'E\xbf'), E (b'G\x15', b'I\xf8Q\xfe\x878\x1f\x01\\<\xc7\xbfU\xfa\xb8\xeb'), E (b'\x0e', E b'\x00\x94\xe5\x94j1\x00o\x1f\x06\xbe\xc3\x17[5\x8e\xefT7o\xd3\xba\xf5\xfb\x80\x00\x01\x13\xb1d\x9e'), E (b' L\xc4\xb9\xd2', b'!0\x00'), E (b'T\xbb', b'\x0f\xb3'), E (b'\x93\x8c1r\xdd\xc8\x01\xad\xe8', E b'\x10\xff\xa1\xe1Zx\x8a\x1d\xbd\xdf\xffi\x9d\x11\xa6')], E ) E Unreliable test timings! On an initial run, this test took 323.47ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.52 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags12-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 114 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 114 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 114 bytes, frozen) function = .run at 0xffffffb905a3e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 114 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\xbb\x00wd\xd0T', b'\xb2'), (b"\x84N\x99()\xe3\xc0\xcb\xee\x1d'Y\xe7\x00\x81", b'\xcc\xa7\xa8S')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\xbb\x00wd\xd0T', b'\xb2'), (b"\x84N\x99()\xe3...ffffb9f4c9a0>, HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 4906420.040702151, result = None finish = 4906420.336208235, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=295506) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 295.51ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 114 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 295.51ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\xbb\x00wd\xd0T', b'\xb2'), E (b"\x84N\x99()\xe3\xc0\xcb\xee\x1d'Y\xe7\x00\x81", b'\xcc\xa7\xa8S')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True), E headers=[(b'\xbb\x00wd\xd0T', b'\xb2'), E (b"\x84N\x99()\xe3\xc0\xcb\xee\x1d'Y\xe7\x00\x81", b'\xcc\xa7\xa8S')], E ) E Unreliable test timings! On an initial run, this test took 295.51ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.46 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags13-validate_headers] _ self = data = ConjectureData(INTERESTING, 236 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 236 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 236 bytes, frozen) function = .run at 0xffffffb8cfc4a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 236 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\xd8\x15Q\xcf\x1bn\x88\xe2\x02\x16\tg\xc6', b''), (b's\xb3\xf5B\x95\x80r\x11\x077y', b'\xc8r\x12\xc9\xef\xc2\x0b\xa0y\x0b&x\xac\xdf\xe3\xb0\x9e\xddR\xdb\xae'), (b'\xd8\x15Q\xcf\x1bn\x88\xe2\x02\x16\tg\xc6', b'')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\xd8\x15Q\xcf\x1bn\x88\xe2\x02\x16\tg\xc6', b'...fffb9f33ce0>, HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4906469.81474897, result = None finish = 4906470.135373028, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=320624) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 320.62ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 236 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 320.62ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\xd8\x15Q\xcf\x1bn\x88\xe2\x02\x16\tg\xc6', b''), E (b's\xb3\xf5B\x95\x80r\x11\x077y', E b'\xc8r\x12\xc9\xef\xc2\x0b\xa0y\x0b&x\xac\xdf\xe3\xb0\x9e\xddR\xdb\xae'), E (b'\xd8\x15Q\xcf\x1bn\x88\xe2\x02\x16\tg\xc6', b'')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False), E headers=[(b'\xd8\x15Q\xcf\x1bn\x88\xe2\x02\x16\tg\xc6', b''), E (b's\xb3\xf5B\x95\x80r\x11\x077y', E b'\xc8r\x12\xc9\xef\xc2\x0b\xa0y\x0b&x\xac\xdf\xe3\xb0\x9e\xddR\xdb\xae'), E (b'\xd8\x15Q\xcf\x1bn\x88\xe2\x02\x16\tg\xc6', b'')], E ) E Unreliable test timings! On an initial run, this test took 320.62ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.47 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags13-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 646 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 646 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 646 bytes, frozen) function = .run at 0xffffffb8e43e20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 646 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'K\x01', b'\xab'), (b'\x03\xd6[\xc4\xc1\xa9\xaa\xe2\x94\xfb', b'2\x15\xc4_\x83\x89\xfe\xee \xb0\x9ek\x07\x94'), (b'...01'), (b'\xff_\xeb\xd3', b'\x1e'), (b'\x04\x15\xff\xad\xbd\xa4\xbeX\x05:\x1e\x00U\x0c\x04\xa7\x89%<\x8cj\t', b''), ...] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'K\x01', b'\xab'), (b'\x03\xd6[\xc4\xc1\xa9\xaa...fffb9f4c9a0>, HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4906562.058886313, result = None finish = 4906562.648542083, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=589656) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 589.66ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 646 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 589.66ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'K\x01', b'\xab'), E (b'\x03\xd6[\xc4\xc1\xa9\xaa\xe2\x94\xfb', E b'2\x15\xc4_\x83\x89\xfe\xee \xb0\x9ek\x07\x94'), E (b'\xfe', b'0;\xac\x96\x01\x97'), E (b'\x05\x1f\x91\xe1Wn\x04\x04\xd3!C\x12.', b'S&(\x01'), E (b'\xff_\xeb\xd3', b'\x1e'), E (b'\x04\x15\xff\xad\xbd\xa4\xbeX\x05:\x1e\x00U\x0c\x04\xa7\x89%<\x8cj\t', E b''), E (b",'(RZ\x15", b'\xf6\x89\x1b\xed'), E (b'\x00\x9c\x8e\xfeW\xf4\xc2\x84', b''), E (b'`\xd3\x81-l', b'\xfb\xce\xb9\xbf@\xf3'), E (b'\xe21\x1e', b'+\xae\xe4&\xcbz'), E (b'\xc6,\xa9f\xeb\xf2O\x9b\x87F&o', b'\xb9'), E (b'\xb6{\xa2\xa7\x84>"', b'\xca^\x06\xed2\xb5\xe3\x07\xec\xff'), E (b'\x83\xb8"WA', b'e\xaf\xab')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False), E headers=[(b'K\x01', b'\xab'), E (b'\x03\xd6[\xc4\xc1\xa9\xaa\xe2\x94\xfb', E b'2\x15\xc4_\x83\x89\xfe\xee \xb0\x9ek\x07\x94'), E (b'\xfe', b'0;\xac\x96\x01\x97'), E (b'\x05\x1f\x91\xe1Wn\x04\x04\xd3!C\x12.', b'S&(\x01'), E (b'\xff_\xeb\xd3', b'\x1e'), E (b'\x04\x15\xff\xad\xbd\xa4\xbeX\x05:\x1e\x00U\x0c\x04\xa7\x89%<\x8cj\t', E b''), E (b",'(RZ\x15", b'\xf6\x89\x1b\xed'), E (b'\x00\x9c\x8e\xfeW\xf4\xc2\x84', b''), E (b'`\xd3\x81-l', b'\xfb\xce\xb9\xbf@\xf3'), E (b'\xe21\x1e', b'+\xae\xe4&\xcbz'), E (b'\xc6,\xa9f\xeb\xf2O\x9b\x87F&o', b'\xb9'), E (b'\xb6{\xa2\xa7\x84>"', b'\xca^\x06\xed2\xb5\xe3\x07\xec\xff'), E (b'\x83\xb8"WA', b'e\xaf\xab')], E ) E Unreliable test timings! On an initial run, this test took 589.66ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.47 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags15-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb8e05120> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [], , HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 4906818.32428391, result = None finish = 4906818.743938162, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=419654) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 419.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 419.65ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False), E headers=[], E ) E Unreliable test timings! On an initial run, this test took 419.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.27 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________ TestSettings.test_cannot_set_invalid_values_for_enable_push __________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffffb8e42f20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , val = 24378 @given(integers()) > def test_cannot_set_invalid_values_for_enable_push(self, val): test/test_settings.py:271: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, 24378) kwargs = {}, initial_draws = 1, start = 4906882.809558511, result = None finish = 4906883.15682994, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=347271) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 347.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(integers()) > def test_cannot_set_invalid_values_for_enable_push(self, val): test/test_settings.py:271: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 347.27ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_set_invalid_values_for_enable_push(self=, val=24378) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_set_invalid_values_for_enable_push( E self=, E val=24378, E ) E Unreliable test timings! On an initial run, this test took 347.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.97 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________ TestSettings.test_cannot_set_invalid_values_for_max_frame_size ________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb8cff740> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , val = -97 @given(integers()) > def test_cannot_set_invalid_values_for_max_frame_size(self, val): test/test_settings.py:324: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, -97) kwargs = {}, initial_draws = 1, start = 4906907.072879288, result = None finish = 4906907.371500669, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=298621) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 298.62ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(integers()) > def test_cannot_set_invalid_values_for_max_frame_size(self, val): test/test_settings.py:324: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 298.62ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_set_invalid_values_for_max_frame_size(self=, val=-97) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_set_invalid_values_for_max_frame_size( E self=, E val=-97, E ) E Unreliable test timings! On an initial run, this test took 298.62ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.95 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____ TestSettings.test_cannot_set_invalid_values_for_max_header_list_size _____ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb80df060> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , val = -80 @given(integers()) > def test_cannot_set_invalid_values_for_max_header_list_size(self, val): test/test_settings.py:350: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, -80) kwargs = {}, initial_draws = 1, start = 4906923.858738053, result = None finish = 4906924.153352538, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=294614) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 294.61ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(integers()) > def test_cannot_set_invalid_values_for_max_header_list_size(self, val): test/test_settings.py:350: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 294.61ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_set_invalid_values_for_max_header_list_size(self=, val=-80) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_set_invalid_values_for_max_header_list_size( E self=, E val=-80, E ) E Unreliable test timings! On an initial run, this test took 294.61ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.01 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___ TestSettings.test_cannot_set_invalid_values_for_enable_connect_protocol ____ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb8e913a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , val = 66 @given(integers()) > def test_cannot_set_invalid_values_for_enable_connect_protocol(self, val): test/test_settings.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, 66) kwargs = {}, initial_draws = 1, start = 4906936.659389876, result = None finish = 4906936.95521586, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=295826) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 295.83ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(integers()) > def test_cannot_set_invalid_values_for_enable_connect_protocol(self, val): test/test_settings.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 295.83ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_set_invalid_values_for_enable_connect_protocol(self=, val=66) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_set_invalid_values_for_enable_connect_protocol( E self=, E val=66, E ) E Unreliable test timings! On an initial run, this test took 295.83ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.95 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky =========================== short test summary info ============================ FAILED test/test_basic_logic.py::TestBasicClient::test_changing_max_frame_size FAILED test/test_events.py::TestRemoteSettingsChanged::test_building_settings_from_scratch FAILED test/test_events.py::TestRemoteSettingsChanged::test_correctly_reports_changed_settings FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_must_acknowledge_for_stream FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_cannot_acknowledge_less_than_zero FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_acknowledging_1024_bytes_when_empty_increments FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_connection_only_empty FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_mixing_update_forms FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags0-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags1-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags3-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags4-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags4-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags5-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags5-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags6-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags7-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags7-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags9-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags11-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags12-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags12-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags13-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags13-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags15-validate_outbound_headers] FAILED test/test_settings.py::TestSettings::test_cannot_set_invalid_values_for_enable_push FAILED test/test_settings.py::TestSettings::test_cannot_set_invalid_values_for_max_frame_size FAILED test/test_settings.py::TestSettings::test_cannot_set_invalid_values_for_max_header_list_size FAILED test/test_settings.py::TestSettings::test_cannot_set_invalid_values_for_enable_connect_protocol ================= 29 failed, 1404 passed in 3295.89s (0:54:55) ================= py312: exit 1 (3350.93 seconds) /builddir/build/BUILD/h2-4.1.0> pytest pid=552823 py312: FAIL code 1 (3355.63 seconds) evaluation failed :( (3370.85 seconds) RPM build errors: error: Bad exit status from /var/tmp/rpm-tmp.wd3JwA (%check) Bad exit status from /var/tmp/rpm-tmp.wd3JwA (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec