Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2418482-60580/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1708300800 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2418482-60580/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1708300800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.fE27ZS + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf h2-4.1.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/h2-4.1.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd h2-4.1.0 + rm -rf /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fedora-tox-adjustments.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fix-repr-checks-for-Python-3.11.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.9M8iFg + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2418482-60580/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1708300800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.8galsX + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf h2-4.1.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/h2-4.1.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd h2-4.1.0 + rm -rf /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fedora-tox-adjustments.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fix-repr-checks-for-Python-3.11.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.8dQLh8 + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating src/h2.egg-info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt writing manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) Handling tox-current-env >= 0.0.6 from tox itself Requirement not satisfied: tox-current-env >= 0.0.6 Exiting dependency generation pass: tox itself + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2418482-60580/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1708300800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.rcYjjI + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf h2-4.1.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/h2-4.1.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd h2-4.1.0 + rm -rf /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fedora-tox-adjustments.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fix-repr-checks-for-Python-3.11.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.gaRBYJ + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating src/h2.egg-info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt writing manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) Handling tox-current-env >= 0.0.6 from tox itself Requirement satisfied: tox-current-env >= 0.0.6 (installed: tox-current-env 0.0.11) py312: OK (2.59 seconds) congratulations :) (16.02 seconds) Handling tox from tox --print-deps-only: py312 Requirement satisfied: tox (installed: tox 4.12.1) Handling pytest>=6.0.1 from tox --print-deps-only: py312 Requirement not satisfied: pytest>=6.0.1 Handling hypothesis>=5.5,<7 from tox --print-deps-only: py312 Requirement not satisfied: hypothesis>=5.5,<7 running dist_info creating h2.egg-info writing h2.egg-info/PKG-INFO writing dependency_links to h2.egg-info/dependency_links.txt writing requirements to h2.egg-info/requires.txt writing top-level names to h2.egg-info/top_level.txt writing manifest file 'h2.egg-info/SOURCES.txt' reading manifest file 'h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'h2.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/h2-4.1.0/h2-4.1.0.dist-info' Handling hyperframe <7,>=6.0 from hook generated metadata: Requires-Dist (h2) Requirement not satisfied: hyperframe <7,>=6.0 Handling hpack <5,>=4.0 from hook generated metadata: Requires-Dist (h2) Requirement not satisfied: hpack <5,>=4.0 + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv h2-4.1.0.dist-info/ removed 'h2-4.1.0.dist-info/LICENSE' removed 'h2-4.1.0.dist-info/top_level.txt' removed 'h2-4.1.0.dist-info/METADATA' removed directory 'h2-4.1.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2418482-60580/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1708300800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.hQnW07 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf h2-4.1.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/h2-4.1.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd h2-4.1.0 + rm -rf /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/h2-4.1.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fedora-tox-adjustments.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + /usr/lib/rpm/rpmuncompress /builddir/build/SOURCES/0001-Fix-repr-checks-for-Python-3.11.patch + /usr/bin/patch -p1 -s --fuzz=0 --no-backup-if-mismatch -f + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.yexYbd + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating src/h2.egg-info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt writing manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) Handling tox-current-env >= 0.0.6 from tox itself Requirement satisfied: tox-current-env >= 0.0.6 (installed: tox-current-env 0.0.11) py312: OK (2.17 seconds) congratulations :) (16.42 seconds) Handling tox from tox --print-deps-only: py312 Requirement satisfied: tox (installed: tox 4.12.1) Handling pytest>=6.0.1 from tox --print-deps-only: py312 Requirement satisfied: pytest>=6.0.1 (installed: pytest 7.3.2) Handling hypothesis>=5.5,<7 from tox --print-deps-only: py312 Requirement satisfied: hypothesis>=5.5,<7 (installed: hypothesis 6.82.0) running dist_info creating h2.egg-info writing h2.egg-info/PKG-INFO writing dependency_links to h2.egg-info/dependency_links.txt writing requirements to h2.egg-info/requires.txt writing top-level names to h2.egg-info/top_level.txt writing manifest file 'h2.egg-info/SOURCES.txt' reading manifest file 'h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'h2.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/h2-4.1.0/h2-4.1.0.dist-info' Handling hyperframe <7,>=6.0 from hook generated metadata: Requires-Dist (h2) Requirement satisfied: hyperframe <7,>=6.0 (installed: hyperframe 6.0.1) Handling hpack <5,>=4.0 from hook generated metadata: Requires-Dist (h2) Requirement satisfied: hpack <5,>=4.0 (installed: hpack 4.0.0) + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv h2-4.1.0.dist-info/ removed 'h2-4.1.0.dist-info/LICENSE' removed 'h2-4.1.0.dist-info/top_level.txt' removed 'h2-4.1.0.dist-info/METADATA' removed directory 'h2-4.1.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-h2-4.1.0-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'], chrootPath='/var/lib/mock/f40-build-2418482-60580/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1708300800 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.YI1qKZ + umask 022 + cd /builddir/build/BUILD + cd h2-4.1.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + '[' -f setup.py ']' + echo 'python3dist(setuptools) >= 40.8' + echo 'python3dist(wheel)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires -t Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) Handling tox-current-env >= 0.0.6 from tox itself Requirement satisfied: tox-current-env >= 0.0.6 (installed: tox-current-env 0.0.11) py312: OK (2.44 seconds) congratulations :) (16.60 seconds) Handling tox from tox --print-deps-only: py312 Requirement satisfied: tox (installed: tox 4.12.1) Handling pytest>=6.0.1 from tox --print-deps-only: py312 Requirement satisfied: pytest>=6.0.1 (installed: pytest 7.3.2) Handling hypothesis>=5.5,<7 from tox --print-deps-only: py312 Requirement satisfied: hypothesis>=5.5,<7 (installed: hypothesis 6.82.0) running dist_info writing h2.egg-info/PKG-INFO writing dependency_links to h2.egg-info/dependency_links.txt writing requirements to h2.egg-info/requires.txt writing top-level names to h2.egg-info/top_level.txt reading manifest file 'h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'h2.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/h2-4.1.0/h2-4.1.0.dist-info' Handling hyperframe <7,>=6.0 from hook generated metadata: Requires-Dist (h2) Requirement satisfied: hyperframe <7,>=6.0 (installed: hyperframe 6.0.1) Handling hpack <5,>=4.0 from hook generated metadata: Requires-Dist (h2) Requirement satisfied: hpack <5,>=4.0 (installed: hpack 4.0.0) + cat /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-buildrequires + rm -rfv h2-4.1.0.dist-info/ removed 'h2-4.1.0.dist-info/LICENSE' removed 'h2-4.1.0.dist-info/top_level.txt' removed 'h2-4.1.0.dist-info/METADATA' removed directory 'h2-4.1.0.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.wKOd1q + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd h2-4.1.0 + mkdir -p /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir Processing /builddir/build/BUILD/h2-4.1.0 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) running dist_info creating /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2.egg-info writing /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2.egg-info/PKG-INFO writing dependency_links to /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2.egg-info/dependency_links.txt writing requirements to /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2.egg-info/requires.txt writing top-level names to /builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2.egg-info/top_level.txt writing manifest file '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2.egg-info/SOURCES.txt' reading manifest file '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-modern-metadata-nj8vh8ig/h2-4.1.0.dist-info' Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: h2 Building wheel for h2 (pyproject.toml): started Running command Building wheel for h2 (pyproject.toml) running bdist_wheel running build running build_py creating build creating build/lib creating build/lib/h2 copying src/h2/events.py -> build/lib/h2 copying src/h2/utilities.py -> build/lib/h2 copying src/h2/connection.py -> build/lib/h2 copying src/h2/errors.py -> build/lib/h2 copying src/h2/windows.py -> build/lib/h2 copying src/h2/frame_buffer.py -> build/lib/h2 copying src/h2/exceptions.py -> build/lib/h2 copying src/h2/settings.py -> build/lib/h2 copying src/h2/__init__.py -> build/lib/h2 copying src/h2/stream.py -> build/lib/h2 copying src/h2/config.py -> build/lib/h2 installing to build/bdist.linux-riscv64/wheel running install running install_lib creating build/bdist.linux-riscv64 creating build/bdist.linux-riscv64/wheel creating build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/events.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/utilities.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/connection.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/errors.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/windows.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/frame_buffer.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/exceptions.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/settings.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/__init__.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/stream.py -> build/bdist.linux-riscv64/wheel/h2 copying build/lib/h2/config.py -> build/bdist.linux-riscv64/wheel/h2 running install_egg_info running egg_info writing src/h2.egg-info/PKG-INFO writing dependency_links to src/h2.egg-info/dependency_links.txt writing requirements to src/h2.egg-info/requires.txt writing top-level names to src/h2.egg-info/top_level.txt reading manifest file 'src/h2.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' no previously-included directories found matching 'docs/build' warning: no previously-included files matching '*.pyc' found anywhere in distribution warning: no previously-included files matching '*.pyo' found anywhere in distribution warning: no previously-included files matching '*.swo' found anywhere in distribution warning: no previously-included files matching '*.swp' found anywhere in distribution warning: no previously-included files matching '*.map' found anywhere in distribution warning: no previously-included files matching '*.yml' found anywhere in distribution warning: no previously-included files matching '*.DS_Store' found anywhere in distribution adding license file 'LICENSE' writing manifest file 'src/h2.egg-info/SOURCES.txt' Copying src/h2.egg-info to build/bdist.linux-riscv64/wheel/h2-4.1.0-py3.12.egg-info running install_scripts creating build/bdist.linux-riscv64/wheel/h2-4.1.0.dist-info/WHEEL creating '/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir/pip-wheel-xaxq9igl/.tmp-ki1mggo2/h2-4.1.0-py3-none-any.whl' and adding 'build/bdist.linux-riscv64/wheel' to it adding 'h2/__init__.py' adding 'h2/config.py' adding 'h2/connection.py' adding 'h2/errors.py' adding 'h2/events.py' adding 'h2/exceptions.py' adding 'h2/frame_buffer.py' adding 'h2/settings.py' adding 'h2/stream.py' adding 'h2/utilities.py' adding 'h2/windows.py' adding 'h2-4.1.0.dist-info/LICENSE' adding 'h2-4.1.0.dist-info/METADATA' adding 'h2-4.1.0.dist-info/WHEEL' adding 'h2-4.1.0.dist-info/top_level.txt' adding 'h2-4.1.0.dist-info/RECORD' removing build/bdist.linux-riscv64/wheel Building wheel for h2 (pyproject.toml): finished with status 'done' Created wheel for h2: filename=h2-4.1.0-py3-none-any.whl size=57120 sha256=1f654d3a7127a5cf2db033c161e71dd074f85cfb4fb6deb53a652b7b725616c7 Stored in directory: /builddir/.cache/pip/wheels/42/0e/c2/d8f850eabf7f97289d30ba1b1585175875c2b090a973af407d Successfully built h2 + PYTHONPATH=/builddir/build/BUILD/h2-4.1.0/build/lib.linux-riscv64-cpython-312 + sphinx-build docs/source html Running Sphinx v7.2.6 making output directory... done loading intersphinx inventory from https://docs.python.org/objects.inv... loading intersphinx inventory from https://hpack.readthedocs.io/en/stable/objects.inv... loading intersphinx inventory from https://pyopenssl.readthedocs.org/en/latest/objects.inv... WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://pyopenssl.readthedocs.org/en/latest/objects.inv' not fetchable due to : HTTPSConnectionPool(host='pyopenssl.readthedocs.org', port=443): Max retries exceeded with url: /en/latest/objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://docs.python.org/objects.inv' not fetchable due to : HTTPSConnectionPool(host='docs.python.org', port=443): Max retries exceeded with url: /objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://hpack.readthedocs.io/en/stable/objects.inv' not fetchable due to : HTTPSConnectionPool(host='hpack.readthedocs.io', port=443): Max retries exceeded with url: /en/stable/objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) building [mo]: targets for 0 po files that are out of date writing output... building [html]: targets for 21 source files that are out of date updating environment: [new config] 21 added, 0 changed, 0 removed reading sources... [ 5%] advanced-usage reading sources... [ 10%] api reading sources... [ 14%] asyncio-example reading sources... [ 19%] basic-usage reading sources... [ 24%] curio-example reading sources... [ 29%] eventlet-example reading sources... [ 33%] examples reading sources... [ 38%] gevent-example reading sources... [ 43%] index reading sources... [ 48%] installation reading sources... [ 52%] low-level reading sources... [ 57%] negotiating-http2 reading sources... [ 62%] plain-sockets-example reading sources... [ 67%] release-notes reading sources... [ 71%] release-process reading sources... [ 76%] testimonials reading sources... [ 81%] tornado-example reading sources... [ 86%] twisted-example reading sources... [ 90%] twisted-head-example reading sources... [ 95%] twisted-post-example reading sources... [100%] wsgi-example WARNING: autodoc: failed to import class 'connection.H2Connection' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'config.H2Configuration' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.RequestReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.ResponseReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.TrailersReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.InformationalResponseReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.DataReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.WindowUpdated' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.RemoteSettingsChanged' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.PingReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.PingAckReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.StreamEnded' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.StreamReset' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.PushedStreamReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.SettingsAcknowledged' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.PriorityUpdated' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.ConnectionTerminated' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'events.AlternativeServiceAvailable' from module 'h2'; the following exception was raised: No module named 'h2' looking for now-outdated files... none found pickling environment... WARNING: autodoc: failed to import class 'events.UnknownFrameReceived' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.H2Error' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.NoSuchStreamError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.StreamClosedError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.RFC1122Error' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.ProtocolError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.FrameTooLargeError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.FrameDataMissingError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.TooManyStreamsError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.FlowControlError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.StreamIDTooLowError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.InvalidSettingsValueError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.NoAvailableStreamIDError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.InvalidBodyLengthError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.UnsupportedFrameError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'exceptions.DenialOfServiceError' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import module 'errors' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'settings.SettingCodes' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'settings.Settings' from module 'h2'; the following exception was raised: No module named 'h2' WARNING: autodoc: failed to import class 'settings.ChangedSetting' from module 'h2'; the following exception was raised: No module named 'h2' done checking consistency... done preparing documents... done copying assets... copying static files... done copying extra files... done done writing output... [ 5%] advanced-usage writing output... [ 10%] api writing output... [ 14%] asyncio-example writing output... [ 19%] basic-usage writing output... [ 24%] curio-example writing output... [ 29%] eventlet-example writing output... [ 33%] examples writing output... [ 38%] gevent-example writing output... [ 43%] index writing output... [ 48%] installation writing output... [ 52%] low-level writing output... [ 57%] negotiating-http2 writing output... [ 62%] plain-sockets-example writing output... [ 67%] release-notes writing output... [ 71%] release-process writing output... [ 76%] testimonials writing output... [ 81%] tornado-example writing output... [ 86%] twisted-example writing output... [ 90%] twisted-head-example writing output... [ 95%] twisted-post-example writing output... [100%] wsgi-example generating indices... genindex done highlighting module code... writing additional pages... search done copying images... [ 50%] _static/h2.connection.H2ConnectionStateMachine.dot.png copying images... [100%] _static/h2.stream.H2StreamStateMachine.dot.png dumping search index in English (code: en)... done dumping object inventory... done build succeeded, 41 warnings. The HTML pages are in html. + rm -rf html/.doctrees html/.buildinfo + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.8rm7hn + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd h2-4.1.0 ++ xargs basename --multiple ++ ls /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir/h2-4.1.0-py3-none-any.whl ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=h2==4.1.0 + '[' -z h2==4.1.0 ']' + TMPDIR=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir h2==4.1.0 Using pip 23.3.2 from /usr/lib/python3.12/site-packages/pip (python 3.12) Looking in links: /builddir/build/BUILD/h2-4.1.0/pyproject-wheeldir Processing ./pyproject-wheeldir/h2-4.1.0-py3-none-any.whl Installing collected packages: h2 Successfully installed h2-4.1.0 + '[' -d /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + site_dirs+=("/usr/lib/python3.12/site-packages") + '[' /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib64/python3.12/site-packages '!=' /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib64/python3.12/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.12/site-packages/h2-4.1.0.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch --record /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/RECORD --output /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages/h2-4.1.0.dist-info/REQUESTED' ++ wc -l /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-ghost-distinfo ++ cut -f1 '-d ' + lines=1 + '[' 1 -ne 1 ']' + RPM_PERCENTAGES_COUNT=2 + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch --sitelib /usr/lib/python3.12/site-packages --sitearch /usr/lib64/python3.12/site-packages --python-version 3.12 --pyproject-record /builddir/build/BUILD/python-h2-4.1.0-1.fc40.noarch-pyproject-record --prefix /usr h2 + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 4.1.0-1.fc40 --unique-debug-suffix -4.1.0-1.fc40.noarch --unique-debug-src-base python-h2-4.1.0-1.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/h2-4.1.0 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.H8ZNRB + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd h2-4.1.0 + TOX_TESTENV_PASSENV='*' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-h2-4.1.0-1.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/h2-4.1.0/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + HOSTNAME=rpmbuild + /usr/bin/python3 -m tox --current-env -q --recreate -e py312 ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.3.2, pluggy-1.3.0 cachedir: .tox/py312/.pytest_cache rootdir: /builddir/build/BUILD/h2-4.1.0 configfile: setup.cfg testpaths: test plugins: hypothesis-6.82.0 collected 1433 items test/test_basic_logic.py ..........................................F.... [ 3%] ........................................................................ [ 8%] ........................................................................ [ 13%] ........................................................................ [ 18%] ........................................................................ [ 23%] ................................................. [ 26%] test/test_closed_streams.py ..................... [ 28%] test/test_complex_logic.py ................. [ 29%] test/test_config.py .................................................... [ 33%] .............. [ 34%] test/test_events.py F.F......................................... [ 37%] test/test_exceptions.py . [ 37%] test/test_flow_control_window.py ...................................FF.. [ 39%] ...FFF [ 40%] test/test_h2_upgrade.py ............. [ 41%] test/test_head_request.py .. [ 41%] test/test_header_indexing.py ........................................... [ 44%] ........................................................................ [ 49%] ........................................................................ [ 54%] ........................................................................ [ 59%] ......................................... [ 62%] test/test_informational_responses.py .............................. [ 64%] test/test_interacting_stacks.py . [ 64%] test/test_invalid_content_lengths.py ... [ 64%] test/test_invalid_frame_sequences.py ....................... [ 66%] test/test_invalid_headers.py ........................................... [ 69%] ........................................................................ [ 74%] .......FFF..FFF..FFFF....FF...F..FF..F.................................. [ 79%] ........................................................................ [ 84%] ........................................................................ [ 89%] ................................. [ 91%] test/test_priority.py ....................... [ 93%] test/test_related_events.py ............. [ 94%] test/test_rfc7838.py ................... [ 95%] test/test_rfc8441.py . [ 95%] test/test_settings.py .......................F.FFF.F.. [ 97%] test/test_state_machines.py F................. [ 99%] test/test_stream_reset.py ..... [ 99%] test/test_utility_functions.py ........ [100%] =================================== FAILURES =================================== _________________ TestBasicClient.test_changing_max_frame_size _________________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffff9febeca0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = frame_size = 16384 @given(frame_size=integers(min_value=2**14, max_value=(2**24 - 1))) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_basic_logic.py:793: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 16384) kwargs = {}, initial_draws = 1, start = 8258229.516877123, result = None finish = 8258229.864772569, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=347895) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 347.89ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(frame_size=integers(min_value=2**14, max_value=(2**24 - 1))) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_basic_logic.py:793: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 347.89ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_changing_max_frame_size(self=, frame_factory=, frame_size=16384) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_changing_max_frame_size( E self=, E frame_factory=, E frame_size=16384, E ) E Unreliable test timings! On an initial run, this test took 347.89ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 10.42 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________ TestRemoteSettingsChanged.test_building_settings_from_scratch _________ self = data = ConjectureData(INTERESTING, 150 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 150 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 150 bytes, frozen) function = .run at 0xffffff9fdf1e40> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 150 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = settings_list = [(59696, 93), (63353, 5), (16541, 233), (21570, 1286096470), (37714, 11399), (7324, 193), ...] @given(SETTINGS_STRATEGY) > def test_building_settings_from_scratch(self, settings_list): test/test_events.py:41: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(59696, 93), (63353, 5), (16541, 233), (21570, 1286096470), (37714, 11399), (7324, 193), ...]) kwargs = {}, initial_draws = 1, start = 8258800.888421164, result = None finish = 8258801.267029616, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=378608) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 378.61ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(SETTINGS_STRATEGY) > def test_building_settings_from_scratch(self, settings_list): test/test_events.py:41: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 150 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 378.61ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_building_settings_from_scratch(self=, settings_list=[(59696, 93), E (63353, 5), E (16541, 233), E (21570, 1286096470), E (37714, 11399), E (7324, 193), E (1, 229), E (25482, 886944006), E (24602, 12549), E (65444, 2116898219), E (61, 2481181296), E (50613, 77), E (65535, 27291137)]) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_building_settings_from_scratch( E self=, E settings_list=[(59696, 93), E (63353, 5), E (16541, 233), E (21570, 1286096470), E (37714, 11399), E (7324, 193), E (1, 229), E (25482, 886944006), E (24602, 12549), E (65444, 2116898219), E (61, 2481181296), E (50613, 77), E (65535, 27291137)], E ) E Unreliable test timings! On an initial run, this test took 378.61ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.58 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ---------------------------------- Hypothesis ---------------------------------- WARNING: Hypothesis has spent more than five minutes working to shrink a failing example, and stopped because it is making very slow progress. When you re-run your tests, shrinking will resume and may take this long before aborting again. PLEASE REPORT THIS if you can provide a reproducing example, so that we can improve shrinking performance for everyone. ______ TestRemoteSettingsChanged.test_correctly_reports_changed_settings _______ self = data = ConjectureData(INTERESTING, 153 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 153 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 153 bytes, frozen) function = .run at 0xffffff9fb387c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 153 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = old_settings_list = [(24196, 487619292), (28795, 110), (46411, 12641), (25942, 42932)] new_settings_list = [(12015, 99), (667, 36529), (33958, 16644), (44481, 46), (1624, 3177539987), (0, 12402), ...] @given(SETTINGS_STRATEGY, SETTINGS_STRATEGY) > def test_correctly_reports_changed_settings(self, old_settings_list, new_settings_list): test/test_events.py:77: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(24196, 487619292), (28795, 110), (46411, 12641), (25942, 42932)], [(12015, 99), (667, 36529), (33958, 16644), (44481, 46), (1624, 3177539987), (0, 12402), ...]) kwargs = {}, initial_draws = 2, start = 8258907.508122361, result = None finish = 8258907.826514105, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=318392) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 318.39ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(SETTINGS_STRATEGY, SETTINGS_STRATEGY) > def test_correctly_reports_changed_settings(self, old_settings_list, new_settings_list): test/test_events.py:77: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 153 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 318.39ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_correctly_reports_changed_settings(self=, old_settings_list=[(24196, 487619292), (28795, 110), (46411, 12641), (25942, 42932)], new_settings_list=[(12015, 99), E (667, 36529), E (33958, 16644), E (44481, 46), E (1624, 3177539987), E (0, 12402), E (26104, 27687), E (53431, 2747835513), E (24532, 57152)]) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_correctly_reports_changed_settings( E self=, E old_settings_list=[(24196, 487619292), E (28795, 110), E (46411, 12641), E (25942, 42932)], E new_settings_list=[(12015, 99), E (667, 36529), E (33958, 16644), E (44481, 46), E (1624, 3177539987), E (0, 12402), E (26104, 27687), E (53431, 2747835513), E (24532, 57152)], E ) E Unreliable test timings! On an initial run, this test took 318.39ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.31 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________ TestAutomaticFlowControl.test_must_acknowledge_for_stream ___________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffff9fb3bec0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = stream_id = 0 @given(stream_id=integers(max_value=0)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:718: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 0) kwargs = {}, initial_draws = 1, start = 8258942.747761956, result = None finish = 8258943.032008429, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=284246) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 284.25ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(stream_id=integers(max_value=0)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:718: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 284.25ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_must_acknowledge_for_stream(self=, frame_factory=, stream_id=0) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_must_acknowledge_for_stream( E self=, E frame_factory=, E stream_id=0, E ) E Unreliable test timings! On an initial run, this test took 284.25ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 12.02 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______ TestAutomaticFlowControl.test_cannot_acknowledge_less_than_zero ________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffff9f945940> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = size = -22023 @given(size=integers(max_value=-1)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:744: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , -22023) kwargs = {}, initial_draws = 1, start = 8258963.875168335, result = None finish = 8258964.226484224, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=351316) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 351.32ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(size=integers(max_value=-1)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:744: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 351.32ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_acknowledge_less_than_zero(self=, frame_factory=, size=-22023) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_acknowledge_less_than_zero( E self=, E frame_factory=, E size=-22023, E ) E Unreliable test timings! On an initial run, this test took 351.32ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 8.64 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestAutomaticFlowControl.test_acknowledging_1024_bytes_when_empty_increments _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffff9f9458a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = frame_factory = increment = 56666 @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:842: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 56666) kwargs = {}, initial_draws = 1, start = 8258992.785057194, result = None finish = 8258993.166108492, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=381051) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 381.05ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = frame_factory = @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:842: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 381.05ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_acknowledging_1024_bytes_when_empty_increments(self=, frame_factory=, increment=56666) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_acknowledging_1024_bytes_when_empty_increments( E self=, E frame_factory=, E increment=56666, E ) E Unreliable test timings! On an initial run, this test took 381.05ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 14.88 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________ TestAutomaticFlowControl.test_connection_only_empty ______________ self = frame_factory = @given(integers(min_value=1025, max_value=(DEFAULT_FLOW_WINDOW // 4) - 1)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:879: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 1025) kwargs = {}, initial_draws = 1, start = 8259020.142929259, result = None finish = 8259020.405371326, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=262442) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 262.44ms, which exceeds the deadline of 200.00ms E Falsifying example: test_connection_only_empty( E self=, E frame_factory=, E increment=1025, E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ______________ TestAutomaticFlowControl.test_mixing_update_forms _______________ self = frame_factory = @given(integers(min_value=1025, max_value=DEFAULT_FLOW_WINDOW)) > @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) test/test_flow_control_window.py:923: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , 1025) kwargs = {}, initial_draws = 1, start = 8259033.690770408, result = None finish = 8259034.057882802, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=367112) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 367.11ms, which exceeds the deadline of 200.00ms E Falsifying example: test_mixing_update_forms( E self=, E frame_factory=, E increment=1025, E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags0-validate_outbound_headers] _ self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.28 seconds (0 invalid ones and 2 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_invalid_headers.py:509: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(13200275483355553363540916160501097881) to this test or run pytest with --hypothesis-seed=13200275483355553363540916160501097881 to reproduce this failure. _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags1-validate_headers] _ self = data = ConjectureData(INTERESTING, 184 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 184 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 184 bytes, frozen) function = .run at 0xffffff9ffeeac0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 184 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\xc8\xff\xec', b'\xff\xcf\xc7\x0f-\x80Z\xef\xfe\xd3B\x08\xa1\x80'), (b'\xd9B\t\x1c\t\xc4\xcc\xe2\x91\x01\x02', b'\xe8\xba'), (b'>\xdf\xeb\x04b%\xb6N\x92QU7F\n', b'')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\xc8\xff\xec', b'\xff\xcf\xc7\x0f-\x80Z\xef\xf...fffffa0ddbce0>, HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 8259295.094930079, result = None finish = 8259295.433710002, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=338780) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 338.78ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 184 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 338.78ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\xc8\xff\xec', b'\xff\xcf\xc7\x0f-\x80Z\xef\xfe\xd3B\x08\xa1\x80'), E (b'\xd9B\t\x1c\t\xc4\xcc\xe2\x91\x01\x02', b'\xe8\xba'), E (b'>\xdf\xeb\x04b%\xb6N\x92QU7F\n', b'')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False), E headers=[(b'\xc8\xff\xec', E b'\xff\xcf\xc7\x0f-\x80Z\xef\xfe\xd3B\x08\xa1\x80'), E (b'\xd9B\t\x1c\t\xc4\xcc\xe2\x91\x01\x02', b'\xe8\xba'), E (b'>\xdf\xeb\x04b%\xb6N\x92QU7F\n', b'')], E ) E Unreliable test timings! On an initial run, this test took 338.78ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.66 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags1-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 312 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 312 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 312 bytes, frozen) function = .run at 0xffffffa004fba0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 312 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'G\x01@\xf1\xdd', b'a%\x07\n\xd8\x07*\xffq\xff\xc7\x9dw-'), (b';F\x9e\xa69\xec\x01>Jn7y\x9bL\x9dO\x01y\xda\x829\xb7...4\x9a\xfd\xfd', b'\xf2Z\r\xe4\x11\x04WL\xa5^\xf1\xf6\xffv\x83\xa0\xc6\x81\xe7QM\x1b\xea\xef\x88(\xe7'), (b'\x1c', b'')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'G\x01@\xf1\xdd', b'a%\x07\n\xd8\x07*\xffq\xff\...fffffa0dfc9a0>, HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 8259366.141653737, result = None finish = 8259366.433382272, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=291729) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 291.73ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 312 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 291.73ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'G\x01@\xf1\xdd', b'a%\x07\n\xd8\x07*\xffq\xff\xc7\x9dw-'), E (b';F\x9e\xa69\xec\x01>Jn7y\x9bL\x9dO\x01y\xda\x829\xb7M', b'\x83\xcb'), E (b'4\x9a\xfd\xfd', E b'\xf2Z\r\xe4\x11\x04WL\xa5^\xf1\xf6\xffv\x83\xa0\xc6\x81\xe7QM\x1b\xea\xef\x88(\xe7'), E (b'\x1c', b'')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=True, is_push_promise=False), E headers=[(b'G\x01@\xf1\xdd', b'a%\x07\n\xd8\x07*\xffq\xff\xc7\x9dw-'), E (b';F\x9e\xa69\xec\x01>Jn7y\x9bL\x9dO\x01y\xda\x829\xb7M', b'\x83\xcb'), E (b'4\x9a\xfd\xfd', E b'\xf2Z\r\xe4\x11\x04WL\xa5^\xf1\xf6\xffv\x83\xa0\xc6\x81\xe7QM\x1b\xea\xef\x88(\xe7'), E (b'\x1c', b'')], E ) E Unreliable test timings! On an initial run, this test took 291.73ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.38 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags3-validate_headers] _ self = data = ConjectureData(INTERESTING, 735 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 735 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 735 bytes, frozen) function = .run at 0xffffffa004ee80> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 735 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\x01\x01\x07\x06P\xb2', b''), (b'\x7f\xbc\\\x11G\xd8d\x83', b'\x0eL\xc0Z1m,\x03\xdb*\x9f]\x06\xff\xd9(mw\xff\xfe\x...), (b'4\xb5', b'\xde'), (b'w\xa0 \xaf', b'\xe0\xf6\xc4\xd3\xf9\xcc\xad'), (b'\xf4\x94\xf3O\x00\xff', b'\xc0\x86'), ...] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\x01\x01\x07\x06P\xb2', b''), (b'\x7f\xbc\\\x1...ffffa0ddbce0>, HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 8259613.90805921, result = None finish = 8259614.234208149, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=326149) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 326.15ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 735 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 326.15ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\x01\x01\x07\x06P\xb2', b''), E (b'\x7f\xbc\\\x11G\xd8d\x83', E b'\x0eL\xc0Z1m,\x03\xdb*\x9f]\x06\xff\xd9(mw\xff\xfe\x02\xf7'), E (b't\n\x8a\xe93\x15[\xbb\x8b\x81', b'0'), E (b'4\xb5', b'\xde'), E (b'w\xa0 \xaf', b'\xe0\xf6\xc4\xd3\xf9\xcc\xad'), E (b'\xf4\x94\xf3O\x00\xff', b'\xc0\x86'), E (b'\xe7\xa5', b's\x18%p'), E (b']\x1b\x18p\xf4\xde\xee-', b'\xc8'), E (b'\xfa\xc3v\x7f\x9f\xeaR\xa0w', b'c\xacr\x8aZ\x9a7'), E (b'\xfc\x80', b''), E (b'\x14', b'4F_T\x0c'), E (b'k\x00', b'\x87\x01\xbf'), E (b'\xa9\xb1\xe9\x00', E b'\xce\x17\x07,"\xcc\xf1\x15\x0f\x01\xd1\xadC\xcf\xe4\x95'), E (b'\xfe~\x05Q\x9c', b''), E (b'\x82M\x0b\x01', b'\xff\xe0\xc4\xbd\xb3\x19\x8c'), E (b'\xae\xc8)\xa2\xfd8\xc5\xba\x18\x00\x01\xd9;\xf2\xe5\xf5\xd3j', b"'\x00")], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False), E headers=[(b'\x01\x01\x07\x06P\xb2', b''), E (b'\x7f\xbc\\\x11G\xd8d\x83', E b'\x0eL\xc0Z1m,\x03\xdb*\x9f]\x06\xff\xd9(mw\xff\xfe\x02\xf7'), E (b't\n\x8a\xe93\x15[\xbb\x8b\x81', b'0'), E (b'4\xb5', b'\xde'), E (b'w\xa0 \xaf', b'\xe0\xf6\xc4\xd3\xf9\xcc\xad'), E (b'\xf4\x94\xf3O\x00\xff', b'\xc0\x86'), E (b'\xe7\xa5', b's\x18%p'), E (b']\x1b\x18p\xf4\xde\xee-', b'\xc8'), E (b'\xfa\xc3v\x7f\x9f\xeaR\xa0w', b'c\xacr\x8aZ\x9a7'), E (b'\xfc\x80', b''), E (b'\x14', b'4F_T\x0c'), E (b'k\x00', b'\x87\x01\xbf'), E (b'\xa9\xb1\xe9\x00', E b'\xce\x17\x07,"\xcc\xf1\x15\x0f\x01\xd1\xadC\xcf\xe4\x95'), E (b'\xfe~\x05Q\x9c', b''), E (b'\x82M\x0b\x01', b'\xff\xe0\xc4\xbd\xb3\x19\x8c'), E (b'\xae\xc8)\xa2\xfd8\xc5\xba\x18\x00\x01\xd9;\xf2\xe5\xf5\xd3j', E b"'\x00")], E ) E Unreliable test timings! On an initial run, this test took 326.15ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags3-validate_outbound_headers] _ self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=True, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.07 seconds (0 invalid ones and 3 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_invalid_headers.py:509: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(28241472988655029495394255881662977677) to this test or run pytest with --hypothesis-seed=28241472988655029495394255881662977677 to reproduce this failure. _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags4-validate_headers] _ self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.30 seconds (0 invalid ones and 3 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_invalid_headers.py:509: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(83524312649842050380755822518993535313) to this test or run pytest with --hypothesis-seed=83524312649842050380755822518993535313 to reproduce this failure. _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags5-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 215 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 215 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 215 bytes, frozen) function = .run at 0xffffff9fc0fba0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 215 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\x01\x01\x01\x035\x01\x01\x01\x01\x00A%8', b'\x05'), (b'\x00\x1b$\xf37\xff\xec\xdf\xbb\x92R\x92\x00\xd0\x0c(\xec', b''), (b'u\xec\xae\x91\xd4\xeeo,', b'\xf1\xcc\x90iU@\xbe\x0b\x15\x96\x96\xb9\xbdN')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\x01\x01\x01\x035\x01\x01\x01\x01\x00A%8', b'\...ffffa0dfc9a0>, HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 8259838.978742501, result = None finish = 8259839.240657271, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=261915) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 261.92ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 215 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 261.92ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\x01\x01\x01\x035\x01\x01\x01\x01\x00A%8', b'\x05'), E (b'\x00\x1b$\xf37\xff\xec\xdf\xbb\x92R\x92\x00\xd0\x0c(\xec', b''), E (b'u\xec\xae\x91\xd4\xeeo,', b'\xf1\xcc\x90iU@\xbe\x0b\x15\x96\x96\xb9\xbdN')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=True, is_push_promise=False), E headers=[(b'\x01\x01\x01\x035\x01\x01\x01\x01\x00A%8', b'\x05'), E (b'\x00\x1b$\xf37\xff\xec\xdf\xbb\x92R\x92\x00\xd0\x0c(\xec', b''), E (b'u\xec\xae\x91\xd4\xeeo,', E b'\xf1\xcc\x90iU@\xbe\x0b\x15\x96\x96\xb9\xbdN')], E ) E Unreliable test timings! On an initial run, this test took 261.92ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.69 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags6-validate_headers] _ self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.55 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_invalid_headers.py:509: FailedHealthCheck _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags6-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 7 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 7 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 7 bytes, frozen) function = .run at 0xffffff9fd61d00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 7 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\x00', b'')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\x00', b'')], , HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 8259906.927807996, result = None finish = 8259907.395198465, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=467390) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 467.39ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 7 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 467.39ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\x00', b'')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=True), E headers=[(b'\x00', b'')], E ) E Unreliable test timings! On an initial run, this test took 467.39ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.63 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags7-validate_headers] _ self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=True, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.20 seconds (0 invalid ones and 2 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_invalid_headers.py:509: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(832017088914118332784981997637454) to this test or run pytest with --hypothesis-seed=832017088914118332784981997637454 to reproduce this failure. _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags9-validate_outbound_headers] _ self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.19 seconds (0 invalid ones and 3 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_invalid_headers.py:509: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(176907577967748260403220445938614070976) to this test or run pytest with --hypothesis-seed=176907577967748260403220445938614070976 to reproduce this failure. _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags10-validate_headers] _ self = data = ConjectureData(INTERESTING, 64 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 64 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 64 bytes, frozen) function = .run at 0xffffff9ffef4c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 64 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'*\x903:\x91\n\x8f', b'\xcc\xac+\xf6\x89\xcb\xa3\x88')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'*\x903:\x91\n\x8f', b'\xcc\xac+\xf6\x89\xcb\xa...ffffa0ddbce0>, HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 8260328.35417731, result = None finish = 8260328.650988826, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=296812) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 296.81ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 64 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 296.81ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'*\x903:\x91\n\x8f', b'\xcc\xac+\xf6\x89\xcb\xa3\x88')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=True, is_response_header=False, is_push_promise=True), E headers=[(b'*\x903:\x91\n\x8f', b'\xcc\xac+\xf6\x89\xcb\xa3\x88')], E ) E Unreliable test timings! On an initial run, this test took 296.81ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.22 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags12-validate_headers] _ self = data = ConjectureData(INTERESTING, 260 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 260 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 260 bytes, frozen) function = .run at 0xffffff9fdf3e20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 260 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\x1b', b'\x03\x86J\xcd\xcd\xfc\xf0IE&}\x10\x9f)'), (b'\xc9/\xfb\x82\x1cH+\xef\xa2\xb6', b''), (b'2\x92\xbdn\xfb\x16\x1e\x9e\x01r\xcf\x84\x96\x9c', b'\xc0D\xca\xea\xc9\xfe\x87{: <\xb2>\xa3\xaa'), (b'\xff\x950\xb2', b'\xf6')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\x1b', b'\x03\x86J\xcd\xcd\xfc\xf0IE&}\x10\x9f...ffffa0ddbce0>, HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 8260646.167997778, result = None finish = 8260646.457406697, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=289409) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 289.41ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 260 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 289.41ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\x1b', b'\x03\x86J\xcd\xcd\xfc\xf0IE&}\x10\x9f)'), E (b'\xc9/\xfb\x82\x1cH+\xef\xa2\xb6', b''), E (b'2\x92\xbdn\xfb\x16\x1e\x9e\x01r\xcf\x84\x96\x9c', E b'\xc0D\xca\xea\xc9\xfe\x87{: <\xb2>\xa3\xaa'), E (b'\xff\x950\xb2', b'\xf6')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=True), E headers=[(b'\x1b', b'\x03\x86J\xcd\xcd\xfc\xf0IE&}\x10\x9f)'), E (b'\xc9/\xfb\x82\x1cH+\xef\xa2\xb6', b''), E (b'2\x92\xbdn\xfb\x16\x1e\x9e\x01r\xcf\x84\x96\x9c', E b'\xc0D\xca\xea\xc9\xfe\x87{: <\xb2>\xa3\xaa'), E (b'\xff\x950\xb2', b'\xf6')], E ) E Unreliable test timings! On an initial run, this test took 289.41ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.58 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags13-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 209 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 209 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 209 bytes, frozen) function = .run at 0xffffff9f5f8ae0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 209 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\x01W\x1f\xcf\xb6x\x9a', b'-i\xde'), (b'O', b'\x12'), (b'\x12', b'\xfd'), (b'\xef;', b'\xeaV\xf0U_a\xf5f\xffS\x0b\xe7\xe8\x9b}\xc8`\xd3\xa6\xbe\xf6'), (b'\xe2q\x00\xf3\x90^\t\xa6\xd9', b'\xa3')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\x01W\x1f\xcf\xb6x\x9a', b'-i\xde'), (b'O', b'...fffa0dfc9a0>, HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 8260903.371218195, result = None finish = 8260903.662483214, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=291265) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 291.26ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 209 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 291.26ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\x01W\x1f\xcf\xb6x\x9a', b'-i\xde'), E (b'O', b'\x12'), E (b'\x12', b'\xfd'), E (b'\xef;', b'\xeaV\xf0U_a\xf5f\xffS\x0b\xe7\xe8\x9b}\xc8`\xd3\xa6\xbe\xf6'), E (b'\xe2q\x00\xf3\x90^\t\xa6\xd9', b'\xa3')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=True, is_push_promise=False), E headers=[(b'\x01W\x1f\xcf\xb6x\x9a', b'-i\xde'), E (b'O', b'\x12'), E (b'\x12', b'\xfd'), E (b'\xef;', E b'\xeaV\xf0U_a\xf5f\xffS\x0b\xe7\xe8\x9b}\xc8`\xd3\xa6\xbe\xf6'), E (b'\xe2q\x00\xf3\x90^\t\xa6\xd9', b'\xa3')], E ) E Unreliable test timings! On an initial run, this test took 291.26ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.75 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags14-validate_headers] _ self = data = ConjectureData(INTERESTING, 199 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 199 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 199 bytes, frozen) function = .run at 0xffffff9f1aeb60> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 199 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'/c\xfb\xe9I', b'~'), (b'a\xa8o\x03', b'\xda'), (b'xw\xd4\x97\xc2\xe1\xa5\xe5', b''), (b'\xea\xfa\x85d', b'\x01.\xb7\x00Y\xd3\xec'), (b'\xb0\x00\xb0?V\xb1\xbeI\x1d', b'\xd3\xa6\xf9\xec\xe9\xff\xa8\xe3')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'/c\xfb\xe9I', b'~'), (b'a\xa8o\x03', b'\xda'),...fffa0ddbce0>, HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=True)) kwargs = {}, initial_draws = 1, start = 8260988.765000611, result = None finish = 8260989.062459431, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=297459) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 297.46ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=True) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 199 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 297.46ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'/c\xfb\xe9I', b'~'), E (b'a\xa8o\x03', b'\xda'), E (b'xw\xd4\x97\xc2\xe1\xa5\xe5', b''), E (b'\xea\xfa\x85d', b'\x01.\xb7\x00Y\xd3\xec'), E (b'\xb0\x00\xb0?V\xb1\xbeI\x1d', b'\xd3\xa6\xf9\xec\xe9\xff\xa8\xe3')], validation_function=validate_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=True)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=True), E headers=[(b'/c\xfb\xe9I', b'~'), E (b'a\xa8o\x03', b'\xda'), E (b'xw\xd4\x97\xc2\xe1\xa5\xe5', b''), E (b'\xea\xfa\x85d', b'\x01.\xb7\x00Y\xd3\xec'), E (b'\xb0\x00\xb0?V\xb1\xbeI\x1d', b'\xd3\xa6\xf9\xec\xe9\xff\xa8\xe3')], E ) E Unreliable test timings! On an initial run, this test took 297.46ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.90 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _ TestFilter.test_range_of_acceptable_outputs[hdr_validation_flags15-validate_outbound_headers] _ self = data = ConjectureData(INTERESTING, 86 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 86 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 86 bytes, frozen) function = .run at 0xffffff9ffef240> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 86 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = headers = [(b'\x01\x04\x97', b'\x17\xb9\x0e\xce'), (b'\x04*\x01\x01\x03\xb8>\xd6\xc9\x9fj\xbb', b'\x07\x97')] validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, [(b'\x01\x04\x97', b'\x17\xb9\x0e\xce'), (b'\x04*\...ffa0dfc9a0>, HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False)) kwargs = {}, initial_draws = 1, start = 8261221.007492338, result = None finish = 8261221.269926, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=262434) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 262.43ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = validation_function = hdr_validation_flags = HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False) @pytest.mark.parametrize('validation_function', validation_functions) > @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) test/test_invalid_headers.py:509: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 86 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 262.43ms, which exceeds the deadline of 200.00ms'), "args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_range_of_acceptable_outputs(self=, headers=[(b'\x01\x04\x97', b'\x17\xb9\x0e\xce'), E (b'\x04*\x01\x01\x03\xb8>\xd6\xc9\x9fj\xbb', b'\x07\x97')], validation_function=validate_outbound_headers, hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False)) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_range_of_acceptable_outputs( E self=, E validation_function=validate_outbound_headers, E hdr_validation_flags=HeaderValidationFlags(is_client=False, is_trailer=False, is_response_header=False, is_push_promise=False), E headers=[(b'\x01\x04\x97', b'\x17\xb9\x0e\xce'), E (b'\x04*\x01\x01\x03\xb8>\xd6\xc9\x9fj\xbb', b'\x07\x97')], E ) E Unreliable test timings! On an initial run, this test took 262.43ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.42 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________ TestSettings.test_cannot_set_invalid_values_for_enable_push __________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffff9f716200> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , val = -89 @given(integers()) > def test_cannot_set_invalid_values_for_enable_push(self, val): test/test_settings.py:271: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, -89) kwargs = {}, initial_draws = 1, start = 8261331.736858891, result = None finish = 8261332.072942971, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=336084) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 336.08ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(integers()) > def test_cannot_set_invalid_values_for_enable_push(self, val): test/test_settings.py:271: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 336.08ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_set_invalid_values_for_enable_push(self=, val=-89) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_set_invalid_values_for_enable_push( E self=, E val=-89, E ) E Unreliable test timings! On an initial run, this test took 336.08ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.98 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________ TestSettings.test_cannot_set_invalid_values_for_max_frame_size ________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffff9f17c360> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , val = -8525 @given(integers()) > def test_cannot_set_invalid_values_for_max_frame_size(self, val): test/test_settings.py:324: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, -8525) kwargs = {}, initial_draws = 1, start = 8261359.392211548, result = None finish = 8261359.675004615, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=282793) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 282.79ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(integers()) > def test_cannot_set_invalid_values_for_max_frame_size(self, val): test/test_settings.py:324: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 282.79ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_set_invalid_values_for_max_frame_size(self=, val=-8525) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_set_invalid_values_for_max_frame_size( E self=, E val=-8525, E ) E Unreliable test timings! On an initial run, this test took 282.79ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.03 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____ TestSettings.test_cannot_set_invalid_values_for_max_header_list_size _____ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffff9f17c0e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = val = -26537 @given(integers()) > def test_cannot_set_invalid_values_for_max_header_list_size(self, val): test/test_settings.py:350: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, -26537) kwargs = {}, initial_draws = 1, start = 8261383.578786483, result = None finish = 8261383.869451052, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=290665) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 290.67ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(integers()) > def test_cannot_set_invalid_values_for_max_header_list_size(self, val): test/test_settings.py:350: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 290.67ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_set_invalid_values_for_max_header_list_size(self=, val=-26537) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_set_invalid_values_for_max_header_list_size( E self=, E val=-26537, E ) E Unreliable test timings! On an initial run, this test took 290.67ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.67 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___ TestSettings.test_cannot_set_invalid_values_for_enable_connect_protocol ____ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffff9f705d00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = , val = -32 @given(integers()) > def test_cannot_set_invalid_values_for_enable_connect_protocol(self, val): test/test_settings.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, -32) kwargs = {}, initial_draws = 1, start = 8261402.401667445, result = None finish = 8261403.072437205, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=670770) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 670.77ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(integers()) > def test_cannot_set_invalid_values_for_enable_connect_protocol(self, val): test/test_settings.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 670.77ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_cannot_set_invalid_values_for_enable_connect_protocol(self=, val=-32) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_cannot_set_invalid_values_for_enable_connect_protocol( E self=, E val=-32, E ) E Unreliable test timings! On an initial run, this test took 670.77ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.97 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________ TestSettingsEquality.test_equality_multiple __________________ self = @given(settings=SettingsStrategy, o_settings=SettingsStrategy) > def test_equality_multiple(self, settings, o_settings): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.22 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. test/test_settings.py:433: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(119392382240538707386592795461102969368) to this test or run pytest with --hypothesis-seed=119392382240538707386592795461102969368 to reproduce this failure. ______________ TestConnectionStateMachine.test_state_transitions _______________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffff9fd63c40> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = state = , input_ = @given(state=sampled_from(h2.connection.ConnectionState), > input_=sampled_from(h2.connection.ConnectionInputs)) test/test_state_machines.py:25: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (, , ) kwargs = {}, initial_draws = 2, start = 8261573.744533969, result = None finish = 8261574.087552651, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=343019) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 343.02ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = @given(state=sampled_from(h2.connection.ConnectionState), > input_=sampled_from(h2.connection.ConnectionInputs)) test/test_state_machines.py:25: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 343.02ms, which exceeds the deadline of 200.00ms'), 'args = (= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_state_transitions(self=, state=ConnectionState.CLOSED, input_=ConnectionInputs.RECV_PING) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_state_transitions( E self=, E state=ConnectionState.CLOSED, E input_=ConnectionInputs.RECV_PING, E ) E Unreliable test timings! On an initial run, this test took 343.02ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.26 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky =========================== short test summary info ============================ FAILED test/test_basic_logic.py::TestBasicClient::test_changing_max_frame_size FAILED test/test_events.py::TestRemoteSettingsChanged::test_building_settings_from_scratch FAILED test/test_events.py::TestRemoteSettingsChanged::test_correctly_reports_changed_settings FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_must_acknowledge_for_stream FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_cannot_acknowledge_less_than_zero FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_acknowledging_1024_bytes_when_empty_increments FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_connection_only_empty FAILED test/test_flow_control_window.py::TestAutomaticFlowControl::test_mixing_update_forms FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags0-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags1-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags1-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags3-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags3-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags4-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags5-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags6-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags6-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags7-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags9-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags10-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags12-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags13-validate_outbound_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags14-validate_headers] FAILED test/test_invalid_headers.py::TestFilter::test_range_of_acceptable_outputs[hdr_validation_flags15-validate_outbound_headers] FAILED test/test_settings.py::TestSettings::test_cannot_set_invalid_values_for_enable_push FAILED test/test_settings.py::TestSettings::test_cannot_set_invalid_values_for_max_frame_size FAILED test/test_settings.py::TestSettings::test_cannot_set_invalid_values_for_max_header_list_size FAILED test/test_settings.py::TestSettings::test_cannot_set_invalid_values_for_enable_connect_protocol FAILED test/test_settings.py::TestSettingsEquality::test_equality_multiple - ... FAILED test/test_state_machines.py::TestConnectionStateMachine::test_state_transitions ================= 30 failed, 1403 passed in 3769.02s (1:02:49) ================= py312: exit 1 (3833.82 seconds) /builddir/build/BUILD/h2-4.1.0> pytest pid=245034 py312: FAIL code 1 (3838.08 seconds) evaluation failed :( (3853.73 seconds) RPM build errors: error: Bad exit status from /var/tmp/rpm-tmp.H8ZNRB (%check) Bad exit status from /var/tmp/rpm-tmp.H8ZNRB (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-h2.spec