Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2401596-60555/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1707868800 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2401596-60555/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1707868800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.r91Y1X + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.khuyix + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2401596-60555/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1707868800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.Wc8iOT + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.AovH1S + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2401596-60555/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1707868800 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.a9TMfA + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.7kL6UU + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2401596-60555/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1707868800 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.ZSgDBm + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.QeSO8F + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing /builddir/build/BUILD/textdistance-4.6.1 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) running dist_info creating /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance.egg-info writing /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance.egg-info/PKG-INFO writing dependency_links to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance.egg-info/dependency_links.txt writing requirements to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance.egg-info/requires.txt writing top-level names to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance.egg-info/top_level.txt writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance.egg-info/SOURCES.txt' reading manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-oic9m3w7/textdistance-4.6.1.dist-info' Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: textdistance Building wheel for textdistance (pyproject.toml): started Running command Building wheel for textdistance (pyproject.toml) running bdist_wheel running build running build_py creating build creating build/lib creating build/lib/textdistance copying textdistance/benchmark.py -> build/lib/textdistance copying textdistance/libraries.py -> build/lib/textdistance copying textdistance/__init__.py -> build/lib/textdistance copying textdistance/utils.py -> build/lib/textdistance creating build/lib/textdistance/algorithms copying textdistance/algorithms/vector_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/compression_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/simple.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/sequence_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/base.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/edit_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/types.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/__init__.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/phonetic.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/token_based.py -> build/lib/textdistance/algorithms copying textdistance/libraries.json -> build/lib/textdistance installing to build/bdist.linux-riscv64/wheel running install running install_lib creating build/bdist.linux-riscv64 creating build/bdist.linux-riscv64/wheel creating build/bdist.linux-riscv64/wheel/textdistance creating build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/vector_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/compression_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/simple.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/sequence_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/base.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/edit_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/types.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/phonetic.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/token_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/libraries.json -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/benchmark.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/libraries.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/utils.py -> build/bdist.linux-riscv64/wheel/textdistance running install_egg_info running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Copying textdistance.egg-info to build/bdist.linux-riscv64/wheel/textdistance-4.6.1-py3.12.egg-info running install_scripts creating build/bdist.linux-riscv64/wheel/textdistance-4.6.1.dist-info/WHEEL creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-wheel-63i_ld3i/.tmp-cbwoc9me/textdistance-4.6.1-py3-none-any.whl' and adding 'build/bdist.linux-riscv64/wheel' to it adding 'textdistance/__init__.py' adding 'textdistance/benchmark.py' adding 'textdistance/libraries.json' adding 'textdistance/libraries.py' adding 'textdistance/utils.py' adding 'textdistance/algorithms/__init__.py' adding 'textdistance/algorithms/base.py' adding 'textdistance/algorithms/compression_based.py' adding 'textdistance/algorithms/edit_based.py' adding 'textdistance/algorithms/phonetic.py' adding 'textdistance/algorithms/sequence_based.py' adding 'textdistance/algorithms/simple.py' adding 'textdistance/algorithms/token_based.py' adding 'textdistance/algorithms/types.py' adding 'textdistance/algorithms/vector_based.py' adding 'textdistance-4.6.1.dist-info/LICENSE' adding 'textdistance-4.6.1.dist-info/METADATA' adding 'textdistance-4.6.1.dist-info/WHEEL' adding 'textdistance-4.6.1.dist-info/top_level.txt' adding 'textdistance-4.6.1.dist-info/RECORD' removing build/bdist.linux-riscv64/wheel Building wheel for textdistance (pyproject.toml): finished with status 'done' Created wheel for textdistance: filename=textdistance-4.6.1-py3-none-any.whl size=31018 sha256=23a231d4866af6735a08a5daa0ed6a3627260fe1120ae8d987eb4c08e987a8bd Stored in directory: /builddir/.cache/pip/wheels/af/08/72/d6baf94a0831066222f63a4e4a469ea938a661b7e8974b7b68 Successfully built textdistance + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.G81gq4 + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 ++ ls /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl ++ xargs basename --multiple ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=textdistance==4.6.1 + '[' -z textdistance==4.6.1 ']' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir textdistance==4.6.1 Using pip 23.3.2 from /usr/lib/python3.12/site-packages/pip (python 3.12) Looking in links: /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing ./pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl Installing collected packages: textdistance Successfully installed textdistance-4.6.1 + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + site_dirs+=("/usr/lib/python3.12/site-packages") + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages '!=' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --record /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED' ++ wc -l /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo ++ cut -f1 '-d ' + lines=1 + '[' 1 -ne 1 ']' + RPM_PERCENTAGES_COUNT=2 + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --sitelib /usr/lib/python3.12/site-packages --sitearch /usr/lib64/python3.12/site-packages --python-version 3.12 --pyproject-record /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record --prefix /usr -l textdistance + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 4.6.1-1.fc40 --unique-debug-suffix -4.6.1-1.fc40.noarch --unique-debug-src-base python-textdistance-4.6.1-1.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/textdistance-4.6.1 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.bOIX4x + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + k='not test_compare[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest -v -k 'not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' -n auto ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.3.2, pluggy-1.3.0 -- /usr/bin/python3 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/builddir/build/BUILD/textdistance-4.6.1/.hypothesis/examples') rootdir: /builddir/build/BUILD/textdistance-4.6.1 configfile: setup.cfg plugins: hypothesis-6.82.0, xdist-3.5.0 created: 8/8 workers 8 workers [412 items] scheduling tests via LoadScheduling tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_monotonic[alg0] tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_monotonic[alg12] [gw2] [ 0%] FAILED tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_by_one[alg1] [gw3] [ 0%] FAILED tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_by_one[alg13] [gw5] [ 0%] PASSED tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_same[alg13] [gw0] [ 0%] FAILED tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_range[alg1] [gw6] [ 1%] PASSED tests/test_common.py::test_normalization_monotonic[alg0] tests/test_common.py::test_normalization_monotonic[alg1] [gw7] [ 1%] PASSED tests/test_common.py::test_normalization_monotonic[alg12] tests/test_common.py::test_normalization_monotonic[alg13] [gw2] [ 1%] FAILED tests/test_common.py::test_normalization_by_one[alg1] tests/test_common.py::test_normalization_by_one[alg2] [gw4] [ 1%] FAILED tests/test_common.py::test_normalization_same[alg0] [gw3] [ 2%] FAILED tests/test_common.py::test_normalization_by_one[alg13] tests/test_common.py::test_normalization_same[alg1] tests/test_common.py::test_normalization_by_one[alg14] [gw1] [ 2%] FAILED tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_range[alg13] [gw0] [ 2%] FAILED tests/test_common.py::test_normalization_range[alg1] tests/test_common.py::test_normalization_range[alg2] [gw7] [ 2%] PASSED tests/test_common.py::test_normalization_monotonic[alg13] tests/test_common.py::test_normalization_monotonic[alg14] [gw2] [ 3%] PASSED tests/test_common.py::test_normalization_by_one[alg2] tests/test_common.py::test_normalization_by_one[alg3] [gw5] [ 3%] FAILED tests/test_common.py::test_normalization_same[alg13] tests/test_common.py::test_normalization_same[alg14] [gw4] [ 3%] PASSED tests/test_common.py::test_normalization_same[alg1] tests/test_common.py::test_normalization_same[alg2] [gw3] [ 3%] FAILED tests/test_common.py::test_normalization_by_one[alg14] tests/test_common.py::test_normalization_by_one[alg15] [gw6] [ 4%] PASSED tests/test_common.py::test_normalization_monotonic[alg1] tests/test_common.py::test_normalization_monotonic[alg2] [gw5] [ 4%] PASSED tests/test_common.py::test_normalization_same[alg14] tests/test_common.py::test_normalization_same[alg15] [gw7] [ 4%] PASSED tests/test_common.py::test_normalization_monotonic[alg14] tests/test_common.py::test_normalization_monotonic[alg15] [gw1] [ 4%] FAILED tests/test_common.py::test_normalization_range[alg13] tests/test_common.py::test_normalization_range[alg14] [gw0] [ 5%] FAILED tests/test_common.py::test_normalization_range[alg2] [gw2] [ 5%] FAILED tests/test_common.py::test_normalization_by_one[alg3] tests/test_common.py::test_normalization_range[alg3] tests/test_common.py::test_normalization_by_one[alg4] [gw3] [ 5%] PASSED tests/test_common.py::test_normalization_by_one[alg15] tests/test_common.py::test_normalization_by_one[alg16] [gw6] [ 5%] PASSED tests/test_common.py::test_normalization_monotonic[alg2] tests/test_common.py::test_normalization_monotonic[alg3] [gw4] [ 6%] FAILED tests/test_common.py::test_normalization_same[alg2] tests/test_common.py::test_normalization_same[alg3] [gw2] [ 6%] PASSED tests/test_common.py::test_normalization_by_one[alg4] tests/test_common.py::test_normalization_by_one[alg5] [gw5] [ 6%] FAILED tests/test_common.py::test_normalization_same[alg15] tests/test_common.py::test_normalization_same[alg16] [gw0] [ 6%] PASSED tests/test_common.py::test_normalization_range[alg3] tests/test_common.py::test_normalization_range[alg4] [gw7] [ 7%] PASSED tests/test_common.py::test_normalization_monotonic[alg15] tests/test_common.py::test_normalization_monotonic[alg16] [gw4] [ 7%] PASSED tests/test_common.py::test_normalization_same[alg3] tests/test_common.py::test_normalization_same[alg4] [gw3] [ 7%] FAILED tests/test_common.py::test_normalization_by_one[alg16] tests/test_common.py::test_normalization_by_one[alg17] [gw1] [ 7%] FAILED tests/test_common.py::test_normalization_range[alg14] tests/test_common.py::test_normalization_range[alg15] [gw6] [ 8%] PASSED tests/test_common.py::test_normalization_monotonic[alg3] tests/test_common.py::test_normalization_monotonic[alg4] [gw4] [ 8%] FAILED tests/test_common.py::test_normalization_same[alg4] tests/test_common.py::test_normalization_same[alg5] [gw1] [ 8%] FAILED tests/test_common.py::test_normalization_range[alg15] tests/test_common.py::test_normalization_range[alg16] [gw0] [ 8%] PASSED tests/test_common.py::test_normalization_range[alg4] tests/test_common.py::test_normalization_range[alg5] [gw3] [ 8%] PASSED tests/test_common.py::test_normalization_by_one[alg17] tests/test_common.py::test_normalization_by_one[alg18] [gw5] [ 9%] FAILED tests/test_common.py::test_normalization_same[alg16] tests/test_common.py::test_normalization_same[alg17] [gw4] [ 9%] PASSED tests/test_common.py::test_normalization_same[alg5] tests/test_common.py::test_normalization_same[alg6] [gw2] [ 9%] FAILED tests/test_common.py::test_normalization_by_one[alg5] tests/test_common.py::test_normalization_by_one[alg6] [gw6] [ 9%] PASSED tests/test_common.py::test_normalization_monotonic[alg4] tests/test_common.py::test_normalization_monotonic[alg5] [gw7] [ 10%] PASSED tests/test_common.py::test_normalization_monotonic[alg16] tests/test_common.py::test_normalization_monotonic[alg17] [gw0] [ 10%] FAILED tests/test_common.py::test_normalization_range[alg5] tests/test_common.py::test_normalization_range[alg6] [gw1] [ 10%] FAILED tests/test_common.py::test_normalization_range[alg16] tests/test_common.py::test_normalization_range[alg17] [gw5] [ 10%] FAILED tests/test_common.py::test_normalization_same[alg17] tests/test_common.py::test_normalization_same[alg18] [gw4] [ 11%] PASSED tests/test_common.py::test_normalization_same[alg6] tests/test_common.py::test_normalization_same[alg7] [gw4] [ 11%] FAILED tests/test_common.py::test_normalization_same[alg7] tests/test_common.py::test_normalization_same[alg8] [gw3] [ 11%] FAILED tests/test_common.py::test_normalization_by_one[alg18] tests/test_common.py::test_normalization_by_one[alg19] [gw6] [ 11%] PASSED tests/test_common.py::test_normalization_monotonic[alg5] [gw5] [ 12%] FAILED tests/test_common.py::test_normalization_same[alg18] tests/test_common.py::test_normalization_same[alg19] tests/test_common.py::test_normalization_monotonic[alg6] [gw7] [ 12%] PASSED tests/test_common.py::test_normalization_monotonic[alg17] [gw2] [ 12%] FAILED tests/test_common.py::test_normalization_by_one[alg6] [gw0] [ 12%] PASSED tests/test_common.py::test_normalization_range[alg6] tests/test_common.py::test_normalization_monotonic[alg18] tests/test_common.py::test_normalization_range[alg7] tests/test_common.py::test_normalization_by_one[alg7] [gw4] [ 13%] FAILED tests/test_common.py::test_normalization_same[alg8] tests/test_common.py::test_normalization_same[alg9] [gw1] [ 13%] FAILED tests/test_common.py::test_normalization_range[alg17] tests/test_common.py::test_normalization_range[alg18] [gw2] [ 13%] FAILED tests/test_common.py::test_normalization_by_one[alg7] tests/test_common.py::test_normalization_by_one[alg8] [gw4] [ 13%] PASSED tests/test_common.py::test_normalization_same[alg9] tests/test_common.py::test_normalization_same[alg10] [gw0] [ 14%] FAILED tests/test_common.py::test_normalization_range[alg7] tests/test_common.py::test_normalization_range[alg8] [gw6] [ 14%] PASSED tests/test_common.py::test_normalization_monotonic[alg6] tests/test_common.py::test_normalization_monotonic[alg7] [gw7] [ 14%] PASSED tests/test_common.py::test_normalization_monotonic[alg18] tests/test_common.py::test_normalization_monotonic[alg19] [gw1] [ 14%] FAILED tests/test_common.py::test_normalization_range[alg18] tests/test_common.py::test_normalization_range[alg19] [gw3] [ 15%] FAILED tests/test_common.py::test_normalization_by_one[alg19] tests/test_common.py::test_normalization_by_one[alg20] [gw5] [ 15%] FAILED tests/test_common.py::test_normalization_same[alg19] tests/test_common.py::test_normalization_same[alg20] [gw2] [ 15%] PASSED tests/test_common.py::test_normalization_by_one[alg8] tests/test_common.py::test_normalization_by_one[alg9] [gw4] [ 15%] PASSED tests/test_common.py::test_normalization_same[alg10] tests/test_common.py::test_normalization_same[alg11] [gw4] [ 16%] FAILED tests/test_common.py::test_normalization_same[alg11] tests/test_common.py::test_no_common_chars[alg0] [gw4] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg0] tests/test_common.py::test_no_common_chars[alg1] [gw4] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg1] tests/test_common.py::test_no_common_chars[alg2] [gw5] [ 16%] PASSED tests/test_common.py::test_normalization_same[alg20] [gw4] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg2] tests/test_common.py::test_no_common_chars[alg3] tests/test_common.py::test_normalization_same[alg21] [gw4] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg3] tests/test_common.py::test_no_common_chars[alg4] [gw4] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg4] tests/test_common.py::test_no_common_chars[alg5] [gw1] [ 17%] PASSED tests/test_common.py::test_normalization_range[alg19] [gw7] [ 17%] PASSED tests/test_common.py::test_normalization_monotonic[alg19] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg5] tests/test_common.py::test_normalization_monotonic[alg20] tests/test_common.py::test_normalization_range[alg20] tests/test_common.py::test_no_common_chars[alg6] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg6] tests/test_common.py::test_no_common_chars[alg7] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg7] tests/test_common.py::test_no_common_chars[alg8] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg8] tests/test_common.py::test_no_common_chars[alg9] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg9] tests/test_common.py::test_no_common_chars[alg10] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg10] tests/test_common.py::test_no_common_chars[alg11] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg11] tests/test_common.py::test_no_common_chars[alg12] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg12] tests/test_common.py::test_no_common_chars[alg13] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg13] tests/test_common.py::test_no_common_chars[alg14] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg14] tests/test_common.py::test_no_common_chars[alg15] [gw3] [ 20%] PASSED tests/test_common.py::test_normalization_by_one[alg20] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg15] tests/test_common.py::test_normalization_by_one[alg21] tests/test_common.py::test_no_common_chars[alg16] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg16] tests/test_common.py::test_no_common_chars[alg17] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg17] tests/test_common.py::test_no_common_chars[alg18] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg18] tests/test_common.py::test_no_common_chars[alg19] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg19] tests/test_common.py::test_no_common_chars[alg20] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg20] tests/test_common.py::test_no_common_chars[alg21] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg21] tests/test_common.py::test_no_common_chars[alg22] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg22] tests/test_common.py::test_no_common_chars[alg23] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg23] tests/test_common.py::test_empty[alg0] [gw4] [ 23%] PASSED tests/test_common.py::test_empty[alg0] tests/test_common.py::test_empty[alg1] [gw4] [ 23%] PASSED tests/test_common.py::test_empty[alg1] tests/test_common.py::test_empty[alg2] [gw4] [ 23%] PASSED tests/test_common.py::test_empty[alg2] tests/test_common.py::test_empty[alg3] [gw6] [ 23%] PASSED tests/test_common.py::test_normalization_monotonic[alg7] tests/test_common.py::test_normalization_monotonic[alg8] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg3] tests/test_common.py::test_empty[alg4] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg4] tests/test_common.py::test_empty[alg5] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg5] tests/test_common.py::test_empty[alg6] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg6] tests/test_common.py::test_empty[alg7] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg7] tests/test_common.py::test_empty[alg8] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg8] tests/test_common.py::test_empty[alg9] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg9] tests/test_common.py::test_empty[alg10] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg10] tests/test_common.py::test_empty[alg11] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg11] tests/test_common.py::test_empty[alg12] [gw4] [ 26%] PASSED tests/test_common.py::test_empty[alg12] tests/test_common.py::test_empty[alg13] [gw4] [ 26%] PASSED tests/test_common.py::test_empty[alg13] tests/test_common.py::test_empty[alg14] [gw4] [ 26%] PASSED tests/test_common.py::test_empty[alg14] tests/test_common.py::test_empty[alg15] [gw4] [ 26%] PASSED tests/test_common.py::test_empty[alg15] tests/test_common.py::test_empty[alg16] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg16] tests/test_common.py::test_empty[alg17] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg17] tests/test_common.py::test_empty[alg18] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg18] tests/test_common.py::test_empty[alg19] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg19] tests/test_common.py::test_empty[alg20] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg20] tests/test_common.py::test_empty[alg21] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg21] tests/test_common.py::test_empty[alg22] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg22] tests/test_common.py::test_empty[alg23] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg23] tests/test_common.py::test_unequal_distance[alg0] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg0] tests/test_common.py::test_unequal_distance[alg1] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg1] tests/test_common.py::test_unequal_distance[alg2] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg2] tests/test_common.py::test_unequal_distance[alg3] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg3] tests/test_common.py::test_unequal_distance[alg4] [gw4] [ 30%] PASSED tests/test_common.py::test_unequal_distance[alg4] [gw2] [ 30%] PASSED tests/test_common.py::test_normalization_by_one[alg9] tests/test_common.py::test_normalization_by_one[alg10] tests/test_common.py::test_unequal_distance[alg5] [gw4] [ 30%] PASSED tests/test_common.py::test_unequal_distance[alg5] tests/test_common.py::test_unequal_distance[alg6] [gw4] [ 30%] PASSED tests/test_common.py::test_unequal_distance[alg6] tests/test_common.py::test_unequal_distance[alg7] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg7] tests/test_common.py::test_unequal_distance[alg8] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg8] tests/test_common.py::test_unequal_distance[alg9] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg9] tests/test_common.py::test_unequal_distance[alg10] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg10] tests/test_common.py::test_unequal_distance[alg11] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg11] tests/test_common.py::test_unequal_distance[alg12] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg12] tests/test_common.py::test_unequal_distance[alg13] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg13] [gw3] [ 32%] FAILED tests/test_common.py::test_normalization_by_one[alg21] tests/test_common.py::test_unequal_distance[alg14] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg14] tests/test_common.py::test_unequal_distance[alg15] tests/test_common.py::test_normalization_by_one[alg22] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg15] tests/test_common.py::test_unequal_distance[alg16] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg16] tests/test_common.py::test_unequal_distance[alg17] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg17] tests/test_common.py::test_unequal_distance[alg18] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg18] tests/test_common.py::test_unequal_distance[alg19] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg19] tests/test_common.py::test_unequal_distance[alg20] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg20] tests/test_common.py::test_unequal_distance[alg21] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg21] tests/test_common.py::test_unequal_distance[alg22] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg22] tests/test_common.py::test_unequal_distance[alg23] [gw4] [ 35%] PASSED tests/test_common.py::test_unequal_distance[alg23] tests/test_external.py::test_compare[Jaro] [gw1] [ 35%] PASSED tests/test_common.py::test_normalization_range[alg20] tests/test_common.py::test_normalization_range[alg21] [gw7] [ 35%] PASSED tests/test_common.py::test_normalization_monotonic[alg20] tests/test_common.py::test_normalization_monotonic[alg21] [gw6] [ 35%] PASSED tests/test_common.py::test_normalization_monotonic[alg8] tests/test_common.py::test_normalization_monotonic[alg9] [gw2] [ 36%] PASSED tests/test_common.py::test_normalization_by_one[alg10] tests/test_common.py::test_normalization_by_one[alg11] [gw4] [ 36%] PASSED tests/test_external.py::test_compare[Jaro] tests/test_external.py::test_compare[JaroWinkler] [gw5] [ 36%] FAILED tests/test_common.py::test_normalization_same[alg21] tests/test_common.py::test_normalization_same[alg22] [gw7] [ 36%] PASSED tests/test_common.py::test_normalization_monotonic[alg21] tests/test_common.py::test_normalization_monotonic[alg22] [gw3] [ 37%] FAILED tests/test_common.py::test_normalization_by_one[alg22] tests/test_common.py::test_normalization_by_one[alg23] [gw6] [ 37%] PASSED tests/test_common.py::test_normalization_monotonic[alg9] tests/test_common.py::test_normalization_monotonic[alg10] [gw1] [ 37%] FAILED tests/test_common.py::test_normalization_range[alg21] tests/test_common.py::test_normalization_range[alg22] [gw2] [ 37%] FAILED tests/test_common.py::test_normalization_by_one[alg11] tests/test_external.py::test_list_of_numbers[Jaro] [gw3] [ 38%] FAILED tests/test_common.py::test_normalization_by_one[alg23] tests/test_compression/test_common.py::test_monotonicity[alg3] [gw5] [ 38%] FAILED tests/test_common.py::test_normalization_same[alg22] tests/test_common.py::test_normalization_same[alg23] [gw3] [ 38%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg3] tests/test_compression/test_common.py::test_monotonicity[alg4] [gw3] [ 38%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg4] tests/test_compression/test_common.py::test_monotonicity[alg5] [gw3] [ 39%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg5] tests/test_compression/test_common.py::test_monotonicity[alg6] [gw3] [ 39%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg6] tests/test_compression/test_common.py::test_simmetry[alg0] [gw1] [ 39%] FAILED tests/test_common.py::test_normalization_range[alg22] [gw4] [ 39%] PASSED tests/test_external.py::test_compare[JaroWinkler] tests/test_external.py::test_qval[None-Jaro] tests/test_common.py::test_normalization_range[alg23] [gw7] [ 40%] PASSED tests/test_common.py::test_normalization_monotonic[alg22] tests/test_common.py::test_normalization_monotonic[alg23] [gw6] [ 40%] PASSED tests/test_common.py::test_normalization_monotonic[alg10] tests/test_common.py::test_normalization_monotonic[alg11] [gw4] [ 40%] FAILED tests/test_external.py::test_qval[None-Jaro] tests/test_external.py::test_qval[None-JaroWinkler] [gw5] [ 40%] PASSED tests/test_common.py::test_normalization_same[alg23] tests/test_compression/test_common.py::test_is_normalized[alg1] [gw4] [ 41%] FAILED tests/test_external.py::test_qval[None-JaroWinkler] [gw5] [ 41%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg1] tests/test_compression/test_common.py::test_is_normalized[alg2] tests/test_external.py::test_qval[1-Jaro] [gw5] [ 41%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] tests/test_compression/test_common.py::test_is_normalized[alg3] [gw4] [ 41%] FAILED tests/test_external.py::test_qval[1-Jaro] tests/test_external.py::test_qval[1-JaroWinkler] [gw2] [ 41%] FAILED tests/test_external.py::test_list_of_numbers[Jaro] tests/test_external.py::test_list_of_numbers[JaroWinkler] [gw3] [ 42%] FAILED tests/test_compression/test_common.py::test_simmetry[alg0] tests/test_compression/test_common.py::test_simmetry[alg1] [gw6] [ 42%] PASSED tests/test_common.py::test_normalization_monotonic[alg11] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] [gw6] [ 42%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] [gw6] [ 42%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] [gw6] [ 43%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] [gw6] [ 43%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] [gw6] [ 43%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] [gw6] [ 43%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] [gw6] [ 44%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] [gw6] [ 44%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] [gw6] [ 44%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] [gw6] [ 44%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] [gw6] [ 45%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] [gw7] [ 45%] PASSED tests/test_common.py::test_normalization_monotonic[alg23] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] [gw6] [ 45%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] [gw6] [ 45%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] [gw2] [ 48%] PASSED tests/test_external.py::test_list_of_numbers[JaroWinkler] tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] [gw2] [ 49%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] [gw2] [ 49%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] tests/test_compression/test_arith_ncd.py::test_make_probs [gw2] [ 50%] PASSED tests/test_compression/test_arith_ncd.py::test_make_probs tests/test_compression/test_arith_ncd.py::test_arith_output [gw2] [ 50%] PASSED tests/test_compression/test_arith_ncd.py::test_arith_output tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] [gw2] [ 50%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] tests/test_edit/test_editex.py::test_distance[--0] [gw6] [ 50%] PASSED tests/test_edit/test_editex.py::test_distance[--0] tests/test_edit/test_editex.py::test_distance[nelson--12] tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] [gw2] [ 51%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] [gw6] [ 51%] PASSED tests/test_edit/test_editex.py::test_distance[nelson--12] tests/test_edit/test_editex.py::test_distance[-neilsen-14] tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] [gw6] [ 51%] PASSED tests/test_edit/test_editex.py::test_distance[-neilsen-14] tests/test_edit/test_editex.py::test_distance[ab-a-2] [gw3] [ 51%] FAILED tests/test_compression/test_common.py::test_simmetry[alg1] [gw2] [ 52%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[ab-a-2] tests/test_edit/test_editex.py::test_distance[ab-c-4] [gw2] [ 52%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] tests/test_compression/test_common.py::test_simmetry[alg2] tests/test_compression/test_common.py::test_monotonicity[alg0] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[ab-c-4] [gw2] [ 53%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg0] tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] tests/test_compression/test_common.py::test_monotonicity[alg1] tests/test_edit/test_editex.py::test_distance[-MARTHA-12] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[-MARTHA-12] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] [gw2] [ 53%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg1] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] tests/test_compression/test_common.py::test_monotonicity[alg2] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] [gw2] [ 54%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg2] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] [gw6] [ 55%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] [gw2] [ 55%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] [gw2] [ 55%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw6] [ 55%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] [gw2] [ 56%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_hamming.py::test_distance[test-text-1] [gw6] [ 56%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] [gw2] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-text-1] [gw5] [ 56%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg3] tests/test_edit/test_hamming.py::test_distance[test-tset-2] [gw2] [ 57%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tset-2] tests/test_compression/test_common.py::test_is_normalized[alg4] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] tests/test_edit/test_hamming.py::test_distance[test-qwe-4] [gw2] [ 57%] PASSED tests/test_edit/test_hamming.py::test_distance[test-qwe-4] [gw6] [ 57%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] tests/test_edit/test_hamming.py::test_distance[test-tesst-2] [gw6] [ 57%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tesst-2] tests/test_edit/test_hamming.py::test_distance[test-testit-2] tests/test_edit/test_hamming.py::test_distance[test-tet-2] [gw6] [ 58%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tet-2] [gw2] [ 58%] PASSED tests/test_edit/test_hamming.py::test_distance[test-testit-2] tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] [gw6] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] [gw6] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] [gw2] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] [gw6] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] [gw2] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] [gw6] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] [gw2] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] [gw2] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] [gw6] [ 60%] PASSED tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] [gw2] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw6] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] [gw6] [ 61%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] [gw2] [ 61%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] [gw6] [ 61%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] [gw6] [ 61%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] tests/test_edit/test_levenshtein.py::test_distance[test-text-1] [gw2] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] [gw6] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-text-1] tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] [gw2] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] [gw6] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] tests/test_edit/test_matrix.py::test_distance[-a-0] [gw2] [ 63%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] [gw6] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[-a-0] [gw2] [ 63%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] [gw6] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] tests/test_edit/test_matrix.py::test_distance[A-C--3] tests/test_edit/test_matrix.py::test_distance[--1] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[A-C--3] tests/test_edit/test_matrix.py::test_distance[G-G-7] [gw2] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[--1] [gw4] [ 64%] FAILED tests/test_external.py::test_qval[1-JaroWinkler] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[G-G-7] tests/test_edit/test_matrix.py::test_distance[T-A--4] tests/test_external.py::test_qval[2-Jaro] [gw2] [ 65%] PASSED tests/test_edit/test_matrix.py::test_distance[T-A--4] tests/test_edit/test_matrix.py::test_distance[A-A-10] tests/test_edit/test_matrix.py::test_distance[T-C-0] [gw6] [ 65%] PASSED tests/test_edit/test_matrix.py::test_distance[A-A-10] [gw2] [ 65%] PASSED tests/test_edit/test_matrix.py::test_distance[T-C-0] tests/test_edit/test_mlipns.py::test_distance[a--0] tests/test_edit/test_matrix.py::test_distance[A-G--1] [gw6] [ 65%] PASSED tests/test_edit/test_mlipns.py::test_distance[a--0] [gw7] [ 66%] PASSED tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor [gw2] [ 66%] PASSED tests/test_edit/test_matrix.py::test_distance[A-G--1] tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor tests/test_edit/test_mlipns.py::test_distance[-a-0] tests/test_edit/test_matrix.py::test_distance[C-T-0] [gw6] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[-a-0] [gw2] [ 66%] PASSED tests/test_edit/test_matrix.py::test_distance[C-T-0] tests/test_edit/test_mlipns.py::test_distance[a-a-1] [gw6] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[a-a-1] tests/test_edit/test_mlipns.py::test_distance[--1] tests/test_edit/test_mlipns.py::test_distance[ab-a-1] [gw2] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[--1] [gw6] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[ab-a-1] tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] [gw6] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] [gw2] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] [gw6] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] [gw2] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] [gw6] [ 68%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] [gw2] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] [gw6] [ 69%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] [gw6] [ 69%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] [gw5] [ 69%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg4] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] [gw2] [ 69%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] [gw6] [ 70%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw2] [ 70%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_compression/test_common.py::test_is_normalized[alg5] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] [gw6] [ 70%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw2] [ 70%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] [gw6] [ 71%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] [gw2] [ 71%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] [gw2] [ 71%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] [gw6] [ 71%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw2] [ 72%] PASSED tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] [gw6] [ 72%] PASSED tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_phonetic/test_editex.py::test_distance[--0] [gw2] [ 72%] PASSED tests/test_phonetic/test_editex.py::test_distance[--0] tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] tests/test_phonetic/test_editex.py::test_distance[nelson--12] [gw6] [ 72%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] tests/test_phonetic/test_editex.py::test_distance[ab-c-4] [gw2] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson--12] [gw6] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-c-4] tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] [gw2] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] tests/test_phonetic/test_editex.py::test_distance[ab-a-2] [gw6] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] [gw2] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-a-2] tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] [gw2] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] [gw6] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] [gw2] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] [gw2] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] [gw6] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] [gw2] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] [gw6] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] [gw2] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] [gw2] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] tests/test_phonetic/test_editex.py::test_local[--0] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] [gw2] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_local[--0] tests/test_phonetic/test_editex.py::test_local[-neilsen-14] tests/test_phonetic/test_editex.py::test_local[nelson--12] [gw6] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_local[-neilsen-14] [gw2] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson--12] tests/test_phonetic/test_editex.py::test_local[ab-a-2] tests/test_phonetic/test_editex.py::test_local[ab-c-2] [gw2] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-c-2] [gw6] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-a-2] tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] [gw2] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_local[neal-niall-1] tests/test_phonetic/test_editex.py::test_local[niall-neal-1] [gw2] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-neal-1] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-niall-1] tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] [gw2] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] [gw6] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] [gw2] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] [gw6] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] [gw2] [ 80%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] [gw2] [ 80%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] [gw6] [ 80%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] [gw6] [ 80%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] [gw2] [ 81%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] [gw2] [ 81%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] [gw2] [ 81%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] [gw6] [ 81%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] [gw2] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] [gw6] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] [gw6] [ 82%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] [gw6] [ 82%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] [gw6] [ 83%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] [gw2] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] [gw6] [ 83%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] [gw2] [ 83%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] [gw3] [ 83%] FAILED tests/test_compression/test_common.py::test_simmetry[alg2] tests/test_compression/test_common.py::test_simmetry[alg3] [gw6] [ 84%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] [gw2] [ 84%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] [gw6] [ 84%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] [gw2] [ 84%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] tests/test_token/test_bag.py::test_distance[qwe-qwe-0] [gw2] [ 85%] PASSED tests/test_token/test_bag.py::test_distance[qwe-qwe-0] [gw6] [ 85%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] tests/test_token/test_bag.py::test_distance[qwe-erty-3] tests/test_token/test_bag.py::test_distance[qwe-ewq-0] [gw2] [ 85%] PASSED tests/test_token/test_bag.py::test_distance[qwe-erty-3] [gw6] [ 85%] PASSED tests/test_token/test_bag.py::test_distance[qwe-ewq-0] tests/test_token/test_cosine.py::test_distance[test-text-0.75] tests/test_token/test_bag.py::test_distance[qwe-rtys-4] [gw2] [ 86%] PASSED tests/test_token/test_cosine.py::test_distance[test-text-0.75] [gw6] [ 86%] PASSED tests/test_token/test_bag.py::test_distance[qwe-rtys-4] tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] tests/test_token/test_jaccard.py::test_distance[test-text-0.6] [gw2] [ 86%] PASSED tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] [gw6] [ 86%] PASSED tests/test_token/test_jaccard.py::test_distance[test-text-0.6] tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] [gw6] [ 87%] PASSED tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] [gw2] [ 87%] PASSED tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set tests/test_token/test_jaccard.py::test_compare_with_tversky [gw4] [ 87%] FAILED tests/test_external.py::test_qval[2-Jaro] tests/test_external.py::test_qval[2-JaroWinkler] [gw3] [ 87%] FAILED tests/test_compression/test_common.py::test_simmetry[alg3] tests/test_compression/test_common.py::test_simmetry[alg4] [gw5] [ 88%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg5] tests/test_compression/test_common.py::test_is_normalized[alg6] [gw7] [ 88%] FAILED tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor [gw1] [ 88%] FAILED tests/test_common.py::test_normalization_range[alg23] tests/test_compression/test_common.py::test_normalized_by_one[alg6] [gw3] [ 88%] FAILED tests/test_compression/test_common.py::test_simmetry[alg4] tests/test_compression/test_common.py::test_simmetry[alg5] [gw6] [ 89%] PASSED tests/test_token/test_jaccard.py::test_compare_with_tversky tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] [gw6] [ 89%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] tests/test_token/test_overlap.py::test_distance[test-text-0.75] [gw6] [ 89%] PASSED tests/test_token/test_overlap.py::test_distance[test-text-0.75] tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] [gw6] [ 89%] PASSED tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] [gw6] [ 90%] PASSED tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] tests/test_token/test_sorensen.py::test_distance[test-text-0.75] [gw6] [ 90%] PASSED tests/test_token/test_sorensen.py::test_distance[test-text-0.75] tests/test_token/test_sorensen.py::test_compare_with_tversky [gw5] [ 90%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg6] tests/test_compression/test_common.py::test_normalized_by_one[alg0] [gw7] [ 90%] PASSED tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor [gw2] [ 91%] FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] [gw2] [ 91%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] [gw1] [ 91%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] [gw1] [ 91%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] [gw1] [ 91%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] [gw6] [ 92%] FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] [gw1] [ 92%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw4] [ 92%] FAILED tests/test_external.py::test_qval[2-JaroWinkler] tests/test_external.py::test_qval[3-Jaro] [gw1] [ 92%] PASSED tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor [gw5] [ 93%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg0] tests/test_compression/test_common.py::test_normalized_by_one[alg1] [gw0] [ 93%] FAILED tests/test_common.py::test_normalization_range[alg8] tests/test_common.py::test_normalization_range[alg9] [gw3] [ 93%] FAILED tests/test_compression/test_common.py::test_simmetry[alg5] tests/test_compression/test_common.py::test_simmetry[alg6] [gw6] [ 93%] FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw4] [ 94%] FAILED tests/test_external.py::test_qval[3-Jaro] tests/test_external.py::test_qval[3-JaroWinkler] [gw7] [ 94%] FAILED tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor tests/test_compression/test_sqrt_ncd.py::test_normalization_range [gw1] [ 94%] PASSED tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor [gw7] [ 94%] FAILED tests/test_compression/test_sqrt_ncd.py::test_normalization_range tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] [gw7] [ 95%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] [gw7] [ 95%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] [gw7] [ 95%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] [gw7] [ 95%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] [gw0] [ 96%] PASSED tests/test_common.py::test_normalization_range[alg9] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] tests/test_common.py::test_normalization_range[alg10] [gw7] [ 96%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] [gw5] [ 96%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg1] tests/test_compression/test_common.py::test_normalized_by_one[alg2] [gw3] [ 96%] FAILED tests/test_compression/test_common.py::test_simmetry[alg6] tests/test_compression/test_common.py::test_is_normalized[alg0] [gw4] [ 97%] FAILED tests/test_external.py::test_qval[3-JaroWinkler] [gw1] [ 97%] FAILED tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor [gw0] [ 97%] FAILED tests/test_common.py::test_normalization_range[alg10] tests/test_common.py::test_normalization_range[alg11] [gw1] [ 97%] FAILED tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor tests/test_compression/test_entropy_ncd.py::test_normalization_range [gw3] [ 98%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] [gw5] [ 98%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg2] tests/test_compression/test_common.py::test_normalized_by_one[alg3] [gw1] [ 98%] FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] [gw1] [ 98%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] [gw1] [ 99%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] [gw0] [ 99%] FAILED tests/test_common.py::test_normalization_range[alg11] [gw5] [ 99%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg3] tests/test_compression/test_common.py::test_normalized_by_one[alg4] [gw5] [ 99%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg4] tests/test_compression/test_common.py::test_normalized_by_one[alg5] [gw5] [100%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg5] =================================== FAILURES =================================== _______________________ test_normalization_by_one[alg0] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.00 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:60: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(131923082755347069155824072301936116055) to this test or run pytest with --hypothesis-seed=131923082755347069155824072301936116055 to reproduce this failure. _______________________ test_normalization_by_one[alg12] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.09 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:60: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(210590080623293233275014084980590112654) to this test or run pytest with --hypothesis-seed=210590080623293233275014084980590112654 to reproduce this failure. ________________________ test_normalization_range[alg0] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 109 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 109 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 109 bytes, frozen) function = .run at 0xffffffa63aa340> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 109 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '§c\U000a4618\x91CÍ\U00044681ò\x9aÅF', right = 'ï¯\U000acb65\U0010377d^' alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('§c\U000a4618\x91CÍ\U00044681ò\x9aÅF', 'ï¯\U000acb65\U0010377d^', Bag({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797193.590135839, result = None finish = 7797194.096075733, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=505940) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 505.94ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 109 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 505.94ms, which exceeds the deadline of 200.00ms'), "args = ('§c\\U000a4618\\x91CÍ\\U0004...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='§c\U000a4618\x91CÍ\U00044681ò\x9aÅF', right='ï¯\U000acb65\U0010377d^', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Bag({'qval': 1, 'external': True}), E left='§c\U000a4618\x91CÍ\U00044681ò\x9aÅF', E right='ï¯\U000acb65\U0010377d^', E ) E Unreliable test timings! On an initial run, this test took 505.94ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg1] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 14 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 14 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 14 bytes, frozen) function = .run at 0xffffff8e046340> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 14 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U0005741a', right = '\x11' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0005741a', '\x11', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797216.576436995, result = None finish = 7797223.755495319, internal_draw_time = 0 runtime = datetime.timedelta(seconds=7, microseconds=179058) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 7179.06ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 14 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 7179.06ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0005741a', '\\x11', Ham...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U0005741a', right='\x11', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left='\U0005741a', E right='\x11', E ) E Unreliable test timings! On an initial run, this test took 7179.06ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.50 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg0] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 19 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 19 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 19 bytes, frozen) function = .run at 0xffffff7febf420> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 19 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '¡õ¥Â', alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¡õ¥Â', Bag({'qval': 1, 'external': True})), kwargs = {} initial_draws = 1, start = 7797227.789979176, result = None finish = 7797228.290155171, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=500176) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 500.18ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 19 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 500.18ms, which exceeds the deadline of 200.00ms'), "args = ('¡õ¥Â', Bag({'qval': 1, 'ext...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='¡õ¥Â', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Bag({'qval': 1, 'external': True}), E text='¡õ¥Â', E ) E Unreliable test timings! On an initial run, this test took 500.18ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.20 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg13] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 99 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 99 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 99 bytes, frozen) function = .run at 0xffffff7615e7a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 99 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '⒈IQ𘅧\x19\x87𤲮\U0008bd93h敩', right = 'Ñ93\x8aN\U0004c547Ï' alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('⒈IQ𘅧\x19\x87𤲮\U0008bd93h敩', 'Ñ93\x8aN\U0004c547Ï', Overlap({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797222.359610334, result = None finish = 7797222.880881228, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=521271) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 521.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 99 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 521.27ms, which exceeds the deadline of 200.00ms'), "args = ('⒈IQ𘅧\\x19\\x87𤲮\\U0008bd93h...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='⒈IQ𘅧\x19\x87𤲮\U0008bd93h敩', right='Ñ93\x8aN\U0004c547Ï', alg=Overlap({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Overlap({'qval': 1, 'as_set': False, 'external': True}), E left='⒈IQ𘅧\x19\x87𤲮\U0008bd93h敩', E right='Ñ93\x8aN\U0004c547Ï', E ) E Unreliable test timings! On an initial run, this test took 521.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.01 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg12] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 10 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 10 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 10 bytes, frozen) function = .run at 0xffffff8be0d4e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 10 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '@0' alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '@0', Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797215.958299802, result = None finish = 7797216.274324498, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=316025) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 316.02ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 10 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 316.02ms, which exceeds the deadline of 200.00ms'), "args = ('', '@0', Tversky({'qval': 1...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='@0', alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}), E left='', E right='@0', E ) E Unreliable test timings! On an initial run, this test took 316.02ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.53 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg1] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 12 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 12 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 12 bytes, frozen) function = .run at 0xffffffa6343a60> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 12 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'ä', right = 'á' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ä', 'á', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797236.317612286, result = None finish = 7797245.946817184, internal_draw_time = 0 runtime = datetime.timedelta(seconds=9, microseconds=629205) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 9629.20ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 12 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 9629.20ms, which exceeds the deadline of 200.00ms'), "args = ('ä', 'á', Hamming({'qval': ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='ä', right='á', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left='ä', E right='á', E ) E Unreliable test timings! On an initial run, this test took 9629.20ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.67 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg13] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 7 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 7 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 7 bytes, frozen) function = .run at 0xffffff7d47b2e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 7 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '±±', alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('±±', Overlap({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 7797245.981102283, result = None finish = 7797246.482664578, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=501562) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 501.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 7 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 501.56ms, which exceeds the deadline of 200.00ms'), "args = ('±±', Overlap({'qval': 1, 'a...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='±±', alg=Overlap({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Overlap({'qval': 1, 'as_set': False, 'external': True}), E text='±±', E ) E Unreliable test timings! On an initial run, this test took 501.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.31 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg14] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 173 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 173 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 173 bytes, frozen) function = .run at 0xffffff75fe4860> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 173 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '=Ò÷ÈÉm\U000fb1fb\U0010d769EÝ\U000edaf5®¨' right = '\x04\x0c\U000d0fb2H\x8e4*¤¨𫰳Å\U000a8622\\º#' alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('=Ò÷ÈÉm\U000fb1fb\U0010d769EÝ\U000edaf5®¨', '\x04\x0c\U000d0fb2H\x8e4*¤¨𫰳Å\U000a8622\\º#', Cosine({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797263.566801397, result = None finish = 7797264.076619391, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=509818) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 509.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 173 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 509.82ms, which exceeds the deadline of 200.00ms'), "args = ('=Ò÷ÈÉm\\U000fb1fb\\U0010d76...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='=Ò÷ÈÉm\U000fb1fb\U0010d769EÝ\U000edaf5®¨', right='\x04\x0c\U000d0fb2H\x8e4*¤¨𫰳Å\U000a8622\\º#', alg=Cosine({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Cosine({'qval': 1, 'as_set': False, 'external': True}), E left='=Ò÷ÈÉm\U000fb1fb\U0010d769EÝ\U000edaf5®¨', E right='\x04\x0c\U000d0fb2H\x8e4*¤¨𫰳Å\U000a8622\\º#', E ) E Unreliable test timings! On an initial run, this test took 509.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.91 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg13] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 80 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 80 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 80 bytes, frozen) function = .run at 0xffffff8beef560> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 80 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0¸\x91u"¨êþ\x02Ç', right = '²\U0009e34c\U0003fdce\x8d' alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0¸\x91u"¨êþ\x02Ç', '²\U0009e34c\U0003fdce\x8d', Overlap({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797286.396898855, result = None finish = 7797286.88180615, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=484907) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 484.91ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 80 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 484.91ms, which exceeds the deadline of 200.00ms'), 'args = (\'0¸\\x91u"¨êþ\\x02Ç\', \'²\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='0¸\x91u"¨êþ\x02Ç', right='²\U0009e34c\U0003fdce\x8d', alg=Overlap({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Overlap({'qval': 1, 'as_set': False, 'external': True}), E left='0¸\x91u"¨êþ\x02Ç', E right='²\U0009e34c\U0003fdce\x8d', E ) E Unreliable test timings! On an initial run, this test took 484.91ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.76 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg2] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 24 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 24 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 24 bytes, frozen) function = .run at 0xffffffa614f6a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 24 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '𨓬u', right = '\U00081a5f' alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𨓬u', '\U00081a5f', Levenshtein({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 7797282.366514097, result = None finish = 7797283.07845249, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=711938) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 711.94ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 24 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 711.94ms, which exceeds the deadline of 200.00ms'), "args = ('𨓬u', '\\U00081a5f', Levensh...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='𨓬u', right='\U00081a5f', alg=Levenshtein({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Levenshtein({'qval': 1, 'test_func': , 'external': True}), E left='𨓬u', E right='\U00081a5f', E ) E Unreliable test timings! On an initial run, this test took 711.94ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.70 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg3] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 8 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 8 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 8 bytes, frozen) function = .run at 0xffffff8deeb240> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 8 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U0005d97a', right = '' alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0005d97a', '', DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True})) kwargs = {}, initial_draws = 2, start = 7797284.261890377, result = None finish = 7797284.578971574, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=317081) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 317.08ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 8 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 317.08ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0005d97a', '', DamerauLe...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U0005d97a', right='', alg=DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), E left='\U0005d97a', E right='', E ) E Unreliable test timings! On an initial run, this test took 317.08ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.61 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg2] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 23 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 23 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 23 bytes, frozen) function = .run at 0xffffff7fc61080> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 23 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '¶ÿ\x16\U00082fe9û' alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¶ÿ\x16\U00082fe9û', Levenshtein({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 7797304.829900059, result = None finish = 7797305.144803656, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=314904) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 314.90ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 23 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 314.90ms, which exceeds the deadline of 200.00ms'), "args = ('¶ÿ\\x16\\U00082fe9û', Leven...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='¶ÿ\x16\U00082fe9û', alg=Levenshtein({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Levenshtein({'qval': 1, 'test_func': , 'external': True}), E text='¶ÿ\x16\U00082fe9û', E ) E Unreliable test timings! On an initial run, this test took 314.90ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.30 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg15] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 45 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 45 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 45 bytes, frozen) function = .run at 0xffffff7d3f58a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 45 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\x98uÄC\x93Õ-\U000f599c\r' alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x98uÄC\x93Õ-\U000f599c\r', StrCmp95({'long_strings': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 7797326.366282631, result = None finish = 7797326.688553627, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=322271) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 322.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 45 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 322.27ms, which exceeds the deadline of 200.00ms'), "args = ('\\x98uÄC\\x93Õ-\\U000f599c\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\x98uÄC\x93Õ-\U000f599c\r', alg=StrCmp95({'long_strings': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=StrCmp95({'long_strings': False, 'external': True}), E text='\x98uÄC\x93Õ-\U000f599c\r', E ) E Unreliable test timings! On an initial run, this test took 322.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.40 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg16] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 16 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 16 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) function = .run at 0xffffff75d60b80> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'Õ', right = '\x02]' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Õ', '\x02]', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797317.368528326, result = None finish = 7797324.878737747, internal_draw_time = 0 runtime = datetime.timedelta(seconds=7, microseconds=510209) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 7510.21ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 16 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 7510.21ms, which exceeds the deadline of 200.00ms'), "args = ('Õ', '\\x02]', MongeElkan({...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='Õ', right='\x02]', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='Õ', E right='\x02]', E ) E Unreliable test timings! On an initial run, this test took 7510.21ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.58 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg14] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 38 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 38 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 38 bytes, frozen) function = .run at 0xffffff8bcc6480> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 38 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000741f7¾\U0003c513', right = '\x92¨' alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000741f7¾\U0003c513', '\x92¨', Cosine({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797332.370554267, result = None finish = 7797332.686613464, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=316059) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 316.06ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 38 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 316.06ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000741f7¾\\U0003c513', '...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U000741f7¾\U0003c513', right='\x92¨', alg=Cosine({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Cosine({'qval': 1, 'as_set': False, 'external': True}), E left='\U000741f7¾\U0003c513', E right='\x92¨', E ) E Unreliable test timings! On an initial run, this test took 316.06ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.44 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg4] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 5 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 5 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) function = .run at 0xffffff7fdb9800> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\x14' alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x14', Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 7797354.979883327, result = None finish = 7797355.501030422, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=521147) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 521.15ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 521.15ms, which exceeds the deadline of 200.00ms'), "args = ('\\x14', Jaro({'qval': 1, 'l...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\x14', alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}), E text='\x14', E ) E Unreliable test timings! On an initial run, this test took 521.15ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.57 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg15] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 148 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 148 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 148 bytes, frozen) function = .run at 0xffffff8bcc68e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 148 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '×UGM\U00092ad0Ó\x95:ìlZ', right = '?h\\𠢜gzþfÙ¤X¡(ß8\x9dð' alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('×UGM\U00092ad0Ó\x95:ìlZ', '?h\\𠢜gzþfÙ¤X¡(ß8\x9dð', StrCmp95({'long_strings': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797364.707847424, result = None finish = 7797365.14721312, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=439366) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 439.37ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 148 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 439.37ms, which exceeds the deadline of 200.00ms'), "args = ('×UGM\\U00092ad0Ó\\x95:ìlZ',...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='×UGM\U00092ad0Ó\x95:ìlZ', right='?h\\𠢜gzþfÙ¤X¡(ß8\x9dð', alg=StrCmp95({'long_strings': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=StrCmp95({'long_strings': False, 'external': True}), E left='×UGM\U00092ad0Ó\x95:ìlZ', E right='?h\\𠢜gzþfÙ¤X¡(ß8\x9dð', E ) E Unreliable test timings! On an initial run, this test took 439.37ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg16] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 22 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 22 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) function = .run at 0xffffff7d5ba8e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U0006b699\x03gè' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0006b699\x03gè', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 1, start = 7797376.788989496, result = None finish = 7797377.292123691, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=503134) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 503.13ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 22 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 503.13ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0006b699\\x03gè', MongeE...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U0006b699\x03gè', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E text='\U0006b699\x03gè', E ) E Unreliable test timings! On an initial run, this test took 503.13ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.98 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg5] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 46 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 46 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 46 bytes, frozen) function = .run at 0xffffff8dd7eb60> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 46 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U0009b2a8yÍ\U0008293f¶', right = '}SËc' alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0009b2a8yÍ\U0008293f¶', '}SËc', JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797365.796893413, result = None finish = 7797366.729925703, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=933032) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 933.03ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 46 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 933.03ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0009b2a8yÍ\\U0008293f¶',...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U0009b2a8yÍ\U0008293f¶', right='}SËc', alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}), E left='\U0009b2a8yÍ\U0008293f¶', E right='}SËc', E ) E Unreliable test timings! On an initial run, this test took 933.03ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.78 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg5] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 98 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 98 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 98 bytes, frozen) function = .run at 0xffffffa624eac0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 98 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '¼\U00083ed1\x8a\x80\x11[\x00\x12´ê´¸𨉰' right = 'ÿk\U000c9659\U000fc186¤°' alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¼\U00083ed1\x8a\x80\x11[\x00\x12´ê´¸𨉰', 'ÿk\U000c9659\U000fc186¤°', JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797392.389918231, result = None finish = 7797392.686416428, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=296498) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 296.50ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 98 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 296.50ms, which exceeds the deadline of 200.00ms'), "args = ('¼\\U00083ed1\\x8a\\x80\\x11...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='¼\U00083ed1\x8a\x80\x11[\x00\x12´ê´¸𨉰', right='ÿk\U000c9659\U000fc186¤°', alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}), E left='¼\U00083ed1\x8a\x80\x11[\x00\x12´ê´¸𨉰', E right='ÿk\U000c9659\U000fc186¤°', E ) E Unreliable test timings! On an initial run, this test took 296.50ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.77 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg16] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffff8bcc40e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797386.367445494, result = None finish = 7797386.680367191, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=312922) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 312.92ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 312.92ms, which exceeds the deadline of 200.00ms'), "args = ('', '', MongeElkan({'algorit...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 312.92ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.68 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg17] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffff7d47b420> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '0', alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', MRA({'qval': 1, 'external': True})), kwargs = {}, initial_draws = 1 start = 7797399.359584757, result = None, finish = 7797399.686204153 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=326619) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 326.62ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 326.62ms, which exceeds the deadline of 200.00ms'), "args = ('0', MRA({'qval': 1, 'extern...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='0', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=MRA({'qval': 1, 'external': True}), E text='0', E ) E Unreliable test timings! On an initial run, this test took 326.62ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.08 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg7] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.11 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:71: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(117230985449369338694394271045570288832) to this test or run pytest with --hypothesis-seed=117230985449369338694394271045570288832 to reproduce this failure. _______________________ test_normalization_by_one[alg18] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 37 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 37 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 37 bytes, frozen) function = .run at 0xffffff75fb1260> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 37 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000b1c9bí¤»', right = '\U0006ec5dt' alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000b1c9bí¤»', '\U0006ec5dt', Prefix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 2, start = 7797422.771059609, result = None finish = 7797423.283623303, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=512564) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 512.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 37 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 512.56ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000b1c9bí¤»', '\\U0006ec...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U000b1c9bí¤»', right='\U0006ec5dt', alg=Prefix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Prefix({'qval': 1, 'sim_test': }), E left='\U000b1c9bí¤»', E right='\U0006ec5dt', E ) E Unreliable test timings! On an initial run, this test took 512.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.45 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg18] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7d5bbc40> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U000366d0' alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000366d0', Prefix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 1, start = 7797422.380023913, result = None finish = 7797422.68212411, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=302100) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 302.10ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 302.10ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000366d0', Prefix({'qval...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000366d0', alg=Prefix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Prefix({'qval': 1, 'sim_test': }), E text='\U000366d0', E ) E Unreliable test timings! On an initial run, this test took 302.10ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.55 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg6] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 66 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 66 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 66 bytes, frozen) function = .run at 0xffffff8dff62a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 66 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000a9bb7\U000ccc21Ä\U000c25ab\U000e79e9', right = 'öS\x08ª³1' alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000a9bb7\U000ccc21Ä\U000c25ab\U000e79e9', 'öS\x08ª³1', MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797428.390561749, result = None finish = 7797428.883678044, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=493116) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 493.12ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 66 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 493.12ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000a9bb7\\U000ccc21Ä\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U000a9bb7\U000ccc21Ä\U000c25ab\U000e79e9', right='öS\x08ª³1', alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}), E left='\U000a9bb7\U000ccc21Ä\U000c25ab\U000e79e9', E right='öS\x08ª³1', E ) E Unreliable test timings! On an initial run, this test took 493.12ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.26 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg8] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('õ³^\x1c\U0006bdad«7\U000bb9baÀXlR)·', LCSStr({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 1, start = 7797446.576804756, result = None finish = 7797446.886733553, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=309929) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 309.93ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalization_same( E alg=LCSStr({'qval': 1, 'external': True}), E text='õ³^\x1c\U0006bdad«7\U000bb9baÀXlR)·', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _______________________ test_normalization_range[alg17] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 31 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 31 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 31 bytes, frozen) function = .run at 0xffffff8bfb5760> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 31 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U0001db03', right = '\U0008c0cdîWu' alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0001db03', '\U0008c0cdîWu', MRA({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797427.36917576, result = None finish = 7797427.682732656, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=313557) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 313.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 31 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 313.56ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0001db03', '\\U0008c0cdî...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U0001db03', right='\U0008c0cdîWu', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MRA({'qval': 1, 'external': True}), E left='\U0001db03', E right='\U0008c0cdîWu', E ) E Unreliable test timings! On an initial run, this test took 313.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.28 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg7] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 95 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 95 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 95 bytes, frozen) function = .run at 0xffffff8e046f20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 95 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = ';tZHÀ©U', right = 'B)+\U0003d248¡\U000a2d20éî\x9e]CV' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (';tZHÀ©U', 'B)+\U0003d248¡\U000a2d20éî\x9e]CV', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 7797448.399095137, result = None finish = 7797448.742422133, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=343327) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 343.33ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 95 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 343.33ms, which exceeds the deadline of 200.00ms'), "args = (';tZHÀ©U', 'B)+\\U0003d248¡\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left=';tZHÀ©U', right='B)+\U0003d248¡\U000a2d20éî\x9e]CV', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E left=';tZHÀ©U', E right='B)+\U0003d248¡\U000a2d20éî\x9e]CV', E ) E Unreliable test timings! On an initial run, this test took 343.33ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.22 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg7] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 52 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 52 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 52 bytes, frozen) function = .run at 0xffffffa5f83060> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 52 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'l1\U00070d014Ûì\\\U0008e3a0', right = '®' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('l1\U00070d014Ûì\\\U0008e3a0', '®', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 7797448.405984337, result = None finish = 7797448.738866633, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=332882) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 332.88ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 52 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 332.88ms, which exceeds the deadline of 200.00ms'), "args = ('l1\\U00070d014Ûì\\\\\\U0008...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='l1\U00070d014Ûì\\\U0008e3a0', right='®', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E left='l1\U00070d014Ûì\\\U0008e3a0', E right='®', E ) E Unreliable test timings! On an initial run, this test took 332.88ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.25 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg18] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 170 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 170 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 170 bytes, frozen) function = .run at 0xffffff8bd5c4a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 170 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'o1\x8b\x90¶ø\U0009395e\U0009840c8\U0006d50b\x8b\x9b' right = '9¢V\U000ae453\\\x02üÂ\x19;é:/' alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('o1\x8b\x90¶ø\U0009395e\U0009840c8\U0006d50b\x8b\x9b', '9¢V\U000ae453\\\x02üÂ\x19;é:/', Prefix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 2, start = 7797458.379784631, result = None finish = 7797458.729244127, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=349459) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 349.46ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 170 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 349.46ms, which exceeds the deadline of 200.00ms'), "args = ('o1\\x8b\\x90¶ø\\U0009395e\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='o1\x8b\x90¶ø\U0009395e\U0009840c8\U0006d50b\x8b\x9b', right='9¢V\U000ae453\\\x02üÂ\x19;é:/', alg=Prefix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Prefix({'qval': 1, 'sim_test': }), E left='o1\x8b\x90¶ø\U0009395e\U0009840c8\U0006d50b\x8b\x9b', E right='9¢V\U000ae453\\\x02üÂ\x19;é:/', E ) E Unreliable test timings! On an initial run, this test took 349.46ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.41 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg19] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 26 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 26 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 26 bytes, frozen) function = .run at 0xffffff75f10220> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 26 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '4⊂\U0006921d\U00102bc1' alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '4⊂\U0006921d\U00102bc1', Postfix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 2, start = 7797460.090112113, result = None finish = 7797460.482276609, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=392164) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 392.16ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 26 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 392.16ms, which exceeds the deadline of 200.00ms'), "args = ('', '4⊂\\U0006921d\\U00102bc...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='4⊂\U0006921d\U00102bc1', alg=Postfix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Postfix({'qval': 1, 'sim_test': }), E left='', E right='4⊂\U0006921d\U00102bc1', E ) E Unreliable test timings! On an initial run, this test took 392.16ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.94 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg19] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 38 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 38 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 38 bytes, frozen) function = .run at 0xffffff7d490720> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 38 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U000a6d40\x07\x11\x847' alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000a6d40\x07\x11\x847', Postfix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 1, start = 7797462.179391891, result = None finish = 7797462.481097687, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=301706) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 301.71ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 38 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 301.71ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000a6d40\\x07\\x11\\x847...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000a6d40\x07\x11\x847', alg=Postfix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Postfix({'qval': 1, 'sim_test': }), E text='\U000a6d40\x07\x11\x847', E ) E Unreliable test timings! On an initial run, this test took 301.71ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.64 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg11] ________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.14 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:71: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(194618622332764635456361084619712224111) to this test or run pytest with --hypothesis-seed=194618622332764635456361084619712224111 to reproduce this failure. _______________________ test_normalization_by_one[alg21] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 5 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 5 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) function = .run at 0xffffff758f0ea0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '0' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '0', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 7797514.403634237, result = None finish = 7797514.883341732, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=479707) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 479.71ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 479.71ms, which exceeds the deadline of 200.00ms'), "args = ('', '0', NeedlemanWunsch({'q...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='0', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='', E right='0', E ) E Unreliable test timings! On an initial run, this test took 479.71ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.68 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg21] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 11 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 11 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 11 bytes, frozen) function = .run at 0xffffff7d1a2980> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 11 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U000c414cḁ' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000c414cḁ', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 7797556.792274787, result = None finish = 7797557.304181082, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=511906) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 511.91ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 11 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 511.91ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000c414cḁ', NeedlemanWun...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000c414cḁ', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E text='\U000c414cḁ', E ) E Unreliable test timings! On an initial run, this test took 511.91ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.27 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg22] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 65 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 65 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 65 bytes, frozen) function = .run at 0xffffff758ad760> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 65 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '"', right = '\x9f*\U0008fcda£iÄo6l\U00097750z' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('"', '\x9f*\U0008fcda£iÄo6l\U00097750z', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 7797553.392192123, result = None finish = 7797553.884808818, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=492617) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 492.62ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 65 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 492.62ms, which exceeds the deadline of 200.00ms'), 'args = (\'"\', \'\\x9f*\\U0008fcda£i...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='"', right='\x9f*\U0008fcda£iÄo6l\U00097750z', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='"', E right='\x9f*\U0008fcda£iÄo6l\U00097750z', E ) E Unreliable test timings! On an initial run, this test took 492.62ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.41 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg21] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 86 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 86 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 86 bytes, frozen) function = .run at 0xffffff8bbc1760> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 86 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '¨\x7fµ\U0005d767\x9b\U000b561bÓ\U000d53a0\x8a\x9d\U0006e447\x81' right = '`Á' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¨\x7fµ\U0005d767\x9b\U000b561bÓ\U000d53a0\x8a\x9d\U0006e447\x81', '`Á', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 7797568.604905962, result = None finish = 7797569.098374057, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=493468) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 493.47ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 86 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 493.47ms, which exceeds the deadline of 200.00ms'), "args = ('¨\\x7fµ\\U0005d767\\x9b\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='¨\x7fµ\U0005d767\x9b\U000b561bÓ\U000d53a0\x8a\x9d\U0006e447\x81', right='`Á', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='¨\x7fµ\U0005d767\x9b\U000b561bÓ\U000d53a0\x8a\x9d\U0006e447\x81', E right='`Á', E ) E Unreliable test timings! On an initial run, this test took 493.47ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.99 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg11] _______________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 16 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 16 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) function = .run at 0xffffff8d8f59e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x9e£', right = '\x93' alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x9e£', '\x93', Sorensen({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797574.821511996, result = None finish = 7797575.203118392, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=381606) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 381.61ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 16 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 381.61ms, which exceeds the deadline of 200.00ms'), "args = ('\\x9e£', '\\x93', Sorensen(...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x9e£', right='\x93', alg=Sorensen({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Sorensen({'qval': 1, 'as_set': False, 'external': True}), E left='\x9e£', E right='\x93', E ) E Unreliable test timings! On an initial run, this test took 381.61ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.50 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg23] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'A', 'X', 'U', 'V', 'P', 'O', 'C', 'F', 'B', 'R', 'Q', 'Z', 'T', 'D', 'Y', 'E', 'J', 'L', 'N', 'G', 'I', 'M', 'K'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𰧖', '𒋣', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': ... 'A', 'X', 'U', 'V', 'P', 'O', 'C', 'F', 'B', 'R', 'Q', 'Z', 'T', 'D', 'Y', 'E', 'J', 'L', 'N', 'G', 'I', 'M', 'K'})})) kwargs = {}, initial_draws = 2, start = 7797607.994505844, result = None finish = 7797608.36231984, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=367814) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 367.81ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalization_by_one( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'A', 'X', 'U', 'V', 'P', 'O', 'C', 'F', 'B', 'R', 'Q', 'Z', 'T', 'D', 'Y', 'E', 'J', 'L', 'N', 'G', 'I', 'M', 'K'})}), E left='𰧖', E right='𒋣', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ________________________ test_normalization_same[alg22] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7d191620> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U000e57a4' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000e57a4', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 7797590.009012635, result = None finish = 7797590.294081332, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=285069) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 285.07ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 285.07ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000e57a4', SmithWaterman...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000e57a4', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E text='\U000e57a4', E ) E Unreliable test timings! On an initial run, this test took 285.07ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.85 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg22] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 5 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 5 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) function = .run at 0xffffff8bbc2480> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0', right = '' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 7797598.402617146, result = None finish = 7797598.736660042, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=334043) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 334.04ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 334.04ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', SmithWaterman({'qva...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='0', right='', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 334.04ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.28 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_qval[None-Jaro] _____________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 55 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 55 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 55 bytes, frozen) function = .run at 0xffffff7f9316c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 55 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000c021e;\x14.Ñ\U000d77c5\U00091f92', right = '¨}', alg = 'Jaro' qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000c021e;\x14.Ñ\U000d77c5\U00091f92', '¨}', 'Jaro', None) kwargs = {}, initial_draws = 2, start = 7797618.361202234, result = None finish = 7797618.685697231, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=324495) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 324.50ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 55 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 324.50ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000c021e;\\x14.Ñ\\U000d7...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\U000c021e;\x14.Ñ\U000d77c5\U00091f92', right='¨}', alg='Jaro', qval=None) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=None, E left='\U000c021e;\x14.Ñ\U000d77c5\U00091f92', E right='¨}', E ) E Unreliable test timings! On an initial run, this test took 324.50ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.81 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_qval[None-JaroWinkler] __________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = 'JaroWinkler', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.24 seconds (0 invalid ones and 2 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_external.py:51: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(180206713685535390628625724711745546240) to this test or run pytest with --hypothesis-seed=180206713685535390628625724711745546240 to reproduce this failure. ___________________________ test_is_normalized[alg1] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.12 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_compression/test_common.py:48: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(270117704997601168381778504176185195633) to this test or run pytest with --hypothesis-seed=270117704997601168381778504176185195633 to reproduce this failure. ___________________________ test_is_normalized[alg2] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_compression/test_common.py:48: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(215025899991178067602526567947052123264) to this test or run pytest with --hypothesis-seed=215025899991178067602526567947052123264 to reproduce this failure. ______________________________ test_qval[1-Jaro] _______________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = 'Jaro', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.09 seconds (0 invalid ones and 3 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_external.py:51: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(233367923307087491054300320157198719493) to this test or run pytest with --hypothesis-seed=233367923307087491054300320157198719493 to reproduce this failure. __________________________ test_list_of_numbers[Jaro] __________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 33 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 33 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 33 bytes, frozen) function = .run at 0xffffff8d69e5c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 33 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = [], right = [69487179716126846, -29389, -98, 11124], alg = 'Jaro' @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ([], [69487179716126846, -29389, -98, 11124], 'Jaro'), kwargs = {} initial_draws = 2, start = 7797616.779859951, result = None finish = 7797617.287896546, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=508037) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 508.04ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro' @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 33 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 508.04ms, which exceeds the deadline of 200.00ms'), "args = ([], [69487179716126846, -293...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_list_of_numbers(left=[], right=[69487179716126846, -29389, -98, 11124], alg='Jaro') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_list_of_numbers( E alg='Jaro', E left=[], E right=[69487179716126846, -29389, -98, 11124], E ) E Unreliable test timings! On an initial run, this test took 508.04ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.71 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg0] ______________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 7797656.403373131, result = None finish = 7797656.898758426, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=495385) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 495.39ms, which exceeds the deadline of 200.00ms E Falsifying example: test_simmetry( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='', E right='', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _____________________________ test_simmetry[alg1] ______________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7574c220> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0', right = '', alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', BWTRLENCD({'terminator': '\x00'})), kwargs = {} initial_draws = 2, start = 7797661.851701073, result = None finish = 7797662.513402866, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=661702) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 661.70ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 661.70ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', BWTRLENCD({'termina...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='0', right='', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=BWTRLENCD({'terminator': '\x00'}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 661.70ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.65 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg3] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 15 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 15 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 15 bytes, frozen) function = .run at 0xffffff7d2ad8a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 15 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '\U00013e5b\x94', alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U00013e5b\x94', RLENCD({'qval': 1})), kwargs = {} initial_draws = 2, start = 7797676.078112622, result = None finish = 7797676.489282518, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=411170) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 411.17ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 15 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 411.17ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U00013e5b\\x94', RLEN...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='\U00013e5b\x94', alg=RLENCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=RLENCD({'qval': 1}), E left='', E right='\U00013e5b\x94', E ) E Unreliable test timings! On an initial run, this test took 411.17ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.94 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_qval[1-JaroWinkler] ___________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 36 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 36 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 36 bytes, frozen) function = .run at 0xffffff7fa97740> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 36 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x7fH', right = '6\x99á+﮻', alg = 'JaroWinkler', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x7fH', '6\x99á+﮻', 'JaroWinkler', 1), kwargs = {}, initial_draws = 2 start = 7797676.687750216, result = None, finish = 7797677.088630411 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=400880) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 400.88ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 36 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 400.88ms, which exceeds the deadline of 200.00ms'), "args = ('\\x7fH', '6\\x99á+﮻', 'Jaro...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\x7fH', right='6\x99á+﮻', alg='JaroWinkler', qval=1) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=1, E left='\x7fH', E right='6\x99á+﮻', E ) E Unreliable test timings! On an initial run, this test took 400.88ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.19 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg4] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7d1b6c00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0', right = '', alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 7797699.397852475, result = None, finish = 7797699.904230169 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=506378) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 506.38ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 506.38ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', ZLIBNCD({})), kwarg...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='0', right='', alg=ZLIBNCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ZLIBNCD({}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 506.38ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.31 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg2] ______________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 7797737.970478766, result = None, finish = 7797738.53137826 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=560899) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 560.90ms, which exceeds the deadline of 200.00ms E Falsifying example: test_simmetry( E alg=BZ2NCD({}), E left='', E right='', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ______________________________ test_qval[2-Jaro] _______________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 62 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 62 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 62 bytes, frozen) function = .run at 0xffffff7fd75300> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 62 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000d1f836ͤ«\U000abb85', right = '\U0005b4f3ͤ«\U000abb85' alg = 'Jaro', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000d1f836ͤ«\U000abb85', '\U0005b4f3ͤ«\U000abb85', 'Jaro', 2) kwargs = {}, initial_draws = 2, start = 7797725.000718703, result = None finish = 7797725.489372598, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=488654) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 488.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 62 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 488.65ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000d1f836ͤ«\\U000abb85'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\U000d1f836ͤ«\U000abb85', right='\U0005b4f3ͤ«\U000abb85', alg='Jaro', qval=2) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=2, E left='\U000d1f836ͤ«\U000abb85', E right='\U0005b4f3ͤ«\U000abb85', E ) E Unreliable test timings! On an initial run, this test took 488.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.22 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg3] ______________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff75800360> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0', right = '', alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', RLENCD({'qval': 1})), kwargs = {}, initial_draws = 2 start = 7797743.988430802, result = None, finish = 7797744.288359699 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=299929) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 299.93ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 299.93ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', RLENCD({'qval': 1})...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='0', right='', alg=RLENCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=RLENCD({'qval': 1}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 299.93ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.63 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg5] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 18 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 18 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 18 bytes, frozen) function = .run at 0xffffff7d1b5300> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 18 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U00010e2c3', right = '\U00060995', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00010e2c3', '\U00060995', SqrtNCD({'qval': 1})), kwargs = {} initial_draws = 2, start = 7797732.392703125, result = None finish = 7797732.916501319, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=523798) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 523.80ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 18 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 523.80ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00010e2c3', '\\U00060995...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='\U00010e2c3', right='\U00060995', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=SqrtNCD({'qval': 1}), E left='\U00010e2c3', E right='\U00060995', E ) E Unreliable test timings! On an initial run, this test took 523.80ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.78 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_idempotency_compressor __________________________ [gw7] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 12 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 12 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 12 bytes, frozen) function = .run at 0xffffffa5a671a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 12 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U00050a06\U00050a06' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_idempotency_compressor(text): tests/test_compression/test_sqrt_ncd.py:31: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00050a06\U00050a06',), kwargs = {}, initial_draws = 1 start = 7797732.395448625, result = None, finish = 7797732.88804812 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=492599) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 492.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_idempotency_compressor(text): tests/test_compression/test_sqrt_ncd.py:31: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 12 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 492.60ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00050a06\\U00050a06',), ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_idempotency_compressor(text='\U00050a06\U00050a06') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_idempotency_compressor( E text='\U00050a06\U00050a06', E ) E Unreliable test timings! On an initial run, this test took 492.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.42 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg23] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 14 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 14 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 14 bytes, frozen) function = .run at 0xffffff8bbab420> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 14 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '𐳿0', right = '1' alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'T', 'B', 'Q', 'F', 'K', 'C', 'Y', 'V', 'L', 'Z', 'U', 'D', 'I', 'O', 'A', 'N', 'M', 'J', 'E', 'P', 'X', 'R', 'G'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𐳿0', '1', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped':... 'T', 'B', 'Q', 'F', 'K', 'C', 'Y', 'V', 'L', 'Z', 'U', 'D', 'I', 'O', 'A', 'N', 'M', 'J', 'E', 'P', 'X', 'R', 'G'})})) kwargs = {}, initial_draws = 2, start = 7797741.774196125, result = None finish = 7797742.32511542, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=550919) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 550.92ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'T', 'B', 'Q', 'F', 'K', 'C', 'Y', 'V', 'L', 'Z', 'U', 'D', 'I', 'O', 'A', 'N', 'M', 'J', 'E', 'P', 'X', 'R', 'G'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 14 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 550.92ms, which exceeds the deadline of 200.00ms'), "args = ('𐳿0', '1', Editex({'match_co...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='𐳿0', right='1', alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'T', 'B', 'Q', 'F', 'K', 'C', 'Y', 'V', 'L', 'Z', 'U', 'D', 'I', 'O', 'A', 'N', 'M', 'J', 'E', 'P', 'X', 'R', 'G'})})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'T', 'B', 'Q', 'F', 'K', 'C', 'Y', 'V', 'L', 'Z', 'U', 'D', 'I', 'O', 'A', 'N', 'M', 'J', 'E', 'P', 'X', 'R', 'G'})}), E left='𐳿0', E right='1', E ) E Unreliable test timings! On an initial run, this test took 550.92ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.61 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg4] ______________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff758b9620> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0', right = '', alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 7797760.795430924, result = None, finish = 7797761.16377432 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=368343) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 368.34ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 368.34ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', ZLIBNCD({})), kwarg...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='0', right='', alg=ZLIBNCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=ZLIBNCD({}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 368.34ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.75 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg6] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7d65ec00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0', right = '', alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})), kwargs = {} initial_draws = 2, start = 7797764.193898188, result = None finish = 7797764.694153982, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=500256) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 500.26ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 500.26ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', EntropyNCD({'qval':...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='0', right='', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 500.26ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.91 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_compare_with_tversky_as_set _______________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 112 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 112 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 112 bytes, frozen) function = .run at 0xffffff8dd536a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 112 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000ac1f0\U0001cca3I\U000b1bb1àÜ\x9c\t' right = 'À\U000368b8🌤Ú\U000734ac𡎫«Ù' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000ac1f0\U0001cca3I\U000b1bb1àÜ\x9c\t', 'À\U000368b8🌤Ú\U000734ac𡎫«Ù') kwargs = {}, initial_draws = 2, start = 7797772.010783205, result = None finish = 7797772.374803601, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=364020) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 364.02ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 112 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 364.02ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000ac1f0\\U0001cca3I\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky_as_set(left='\U000ac1f0\U0001cca3I\U000b1bb1àÜ\x9c\t', right='À\U000368b8🌤Ú\U000734ac𡎫«Ù') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky_as_set( E left='\U000ac1f0\U0001cca3I\U000b1bb1àÜ\x9c\t', E right='À\U000368b8🌤Ú\U000734ac𡎫«Ù', E ) E Unreliable test timings! On an initial run, this test took 364.02ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.08 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg6] _________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 66 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 66 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 66 bytes, frozen) function = .run at 0xffffff8bccb1a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 66 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '𗋑ô𭀊𗋑æ', right = '\x16\U0004aade\x93' alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𗋑ô𭀊𗋑æ', '\x16\U0004aade\x93', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) kwargs = {}, initial_draws = 2, start = 7797776.873986053, result = None finish = 7797777.293436149, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=419450) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 419.45ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 66 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 419.45ms, which exceeds the deadline of 200.00ms'), "args = ('𗋑ô𭀊𗋑æ', '\\x16\\U0004aade\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='𗋑ô𭀊𗋑æ', right='\x16\U0004aade\x93', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='𗋑ô𭀊𗋑æ', E right='\x16\U0004aade\x93', E ) E Unreliable test timings! On an initial run, this test took 419.45ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.45 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________________________ test_compare_with_tversky ___________________________ [gw6] linux -- Python 3.12.0 /usr/bin/python3 @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_token/test_sorensen.py:27: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(151175494012349191863678083007592213591) to this test or run pytest with --hypothesis-seed=151175494012349191863678083007592213591 to reproduce this failure. ___________________________ test_qval[2-JaroWinkler] ___________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 22 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 22 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) function = .run at 0xffffff7febf100> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U00046fc4\U00046fc4', right = '\U00046fc4\U00046fc4' alg = 'JaroWinkler', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00046fc4\U00046fc4', '\U00046fc4\U00046fc4', 'JaroWinkler', 2) kwargs = {}, initial_draws = 2, start = 7797782.398488295, result = None finish = 7797782.88947109, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=490983) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 490.98ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 22 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 490.98ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00046fc4\\U00046fc4', '\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\U00046fc4\U00046fc4', right='\U00046fc4\U00046fc4', alg='JaroWinkler', qval=2) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=2, E left='\U00046fc4\U00046fc4', E right='\U00046fc4\U00046fc4', E ) E Unreliable test timings! On an initial run, this test took 490.98ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.71 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg0] _________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffff7d5b94e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 7797787.59853434, result = None finish = 7797788.016449035, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=417915) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 417.91ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 417.91ms, which exceeds the deadline of 200.00ms'), "args = ('', '', ArithNCD({'base': 2,...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 417.91ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg8] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 84 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 84 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 84 bytes, frozen) function = .run at 0xffffffa5675940> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 84 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U0009408f7\U00040902h\U00078e2a\U000dd233' right = '\U000f408fòo29\U00078e2a\U000dd233' alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0009408f7\U00040902h\U00078e2a\U000dd233', '\U000f408fòo29\U00078e2a\U000dd233', LCSStr({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797773.801620486, result = None finish = 7797774.188261982, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=386641) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 386.64ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 84 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 386.64ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0009408f7\\U00040902h\\U...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U0009408f7\U00040902h\U00078e2a\U000dd233', right='\U000f408fòo29\U00078e2a\U000dd233', alg=LCSStr({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=LCSStr({'qval': 1, 'external': True}), E left='\U0009408f7\U00040902h\U00078e2a\U000dd233', E right='\U000f408fòo29\U00078e2a\U000dd233', E ) E Unreliable test timings! On an initial run, this test took 386.64ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.98 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ---------------------------------- Hypothesis ---------------------------------- WARNING: Hypothesis has spent more than five minutes working to shrink a failing example, and stopped because it is making very slow progress. When you re-run your tests, shrinking will resume and may take this long before aborting again. PLEASE REPORT THIS if you can provide a reproducing example, so that we can improve shrinking performance for everyone. _____________________________ test_simmetry[alg5] ______________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 56 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 56 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 56 bytes, frozen) function = .run at 0xffffff758ae340> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 56 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '質\U000bdc5d𥧵ws뒎î', right = '', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('質\U000bdc5d𥧵ws뒎î', '', SqrtNCD({'qval': 1})), kwargs = {} initial_draws = 2, start = 7797796.07571665, result = None finish = 7797796.491618745, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=415902) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 415.90ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 56 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 415.90ms, which exceeds the deadline of 200.00ms'), "args = ('質\\U000bdc5d𥧵ws뒎î', '', Sqr...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='質\U000bdc5d𥧵ws뒎î', right='', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=SqrtNCD({'qval': 1}), E left='質\U000bdc5d𥧵ws뒎î', E right='', E ) E Unreliable test timings! On an initial run, this test took 415.90ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.02 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_compare_with_tversky_as_set _______________________ [gw6] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 85 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 85 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 85 bytes, frozen) function = .run at 0xffffff9055f560> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 85 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'Õ\U000b889a\U0001ff3b\x16\U0006e8a6桨' right = 'ð\U0001239b\U0005dc4c4ö.\U0005c801' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_sorensen.py:37: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Õ\U000b889a\U0001ff3b\x16\U0006e8a6桨', 'ð\U0001239b\U0005dc4c4ö.\U0005c801') kwargs = {}, initial_draws = 2, start = 7797803.403698772, result = None finish = 7797803.713586169, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=309887) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 309.89ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_sorensen.py:37: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 85 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 309.89ms, which exceeds the deadline of 200.00ms'), "args = ('Õ\\U000b889a\\U0001ff3b\\x1...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky_as_set(left='Õ\U000b889a\U0001ff3b\x16\U0006e8a6桨', right='ð\U0001239b\U0005dc4c4ö.\U0005c801') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky_as_set( E left='Õ\U000b889a\U0001ff3b\x16\U0006e8a6桨', E right='ð\U0001239b\U0005dc4c4ö.\U0005c801', E ) E Unreliable test timings! On an initial run, this test took 309.89ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.08 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ______________________________ test_qval[3-Jaro] _______________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 116 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 116 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 116 bytes, frozen) function = .run at 0xffffff7fd05d00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 116 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U00040b15x§¶\x13XØ¡\U000c60f3', right = '*æ\U000cf65a#À5𧑼\x8fËB¤' alg = 'Jaro', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00040b15x§¶\x13XØ¡\U000c60f3', '*æ\U000cf65a#À5𧑼\x8fËB¤', 'Jaro', 3) kwargs = {}, initial_draws = 2, start = 7797811.804417483, result = None finish = 7797812.294769778, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=490352) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 490.35ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 116 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 490.35ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00040b15x§¶\\x13XØ¡\\U00...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\U00040b15x§¶\x13XØ¡\U000c60f3', right='*æ\U000cf65a#À5𧑼\x8fËB¤', alg='Jaro', qval=3) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=3, E left='\U00040b15x§¶\x13XØ¡\U000c60f3', E right='*æ\U000cf65a#À5𧑼\x8fËB¤', E ) E Unreliable test timings! On an initial run, this test took 490.35ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.86 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_distributivity_compressor ________________________ [gw7] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 38 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 38 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 38 bytes, frozen) function = .run at 0xffffffa5bea340> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 38 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left1 = '1=1\U00060a03', left2 = '6', right = '0\U000ff909' @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.text(min_size=1), ) tests/test_compression/test_sqrt_ncd.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('1=1\U00060a03', '6', '0\U000ff909'), kwargs = {}, initial_draws = 3 start = 7797804.803462157, result = None, finish = 7797805.288607552 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=485145) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 485.14ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.text(min_size=1), ) tests/test_compression/test_sqrt_ncd.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 38 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 485.14ms, which exceeds the deadline of 200.00ms'), "args = ('1=1\\U00060a03', '6', '0\\U...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_distributivity_compressor(left1='1=1\U00060a03', left2='6', right='0\U000ff909') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_distributivity_compressor( E left1='1=1\U00060a03', E left2='6', E right='0\U000ff909', E ) E Unreliable test timings! On an initial run, this test took 485.14ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.57 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_normalization_range ___________________________ [gw7] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffffa5bce840> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = 'Ā' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_sqrt_ncd.py:59: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Ā',), kwargs = {}, initial_draws = 1, start = 7797836.770333118 result = None, finish = 7797837.293882913, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=523550) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 523.55ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_sqrt_ncd.py:59: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 523.55ms, which exceeds the deadline of 200.00ms'), "args = ('Ā',), kwargs = {}, initial_...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(text='Ā') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E text='Ā', E ) E Unreliable test timings! On an initial run, this test took 523.55ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.32 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg1] _________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 107 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 107 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 107 bytes, frozen) function = .run at 0xffffff7d0d8680> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 107 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000fc62d\x1d\x81\x17ÏA' right = 'D\x14¹\x06\U00082ae6\x11\U000593c6E¨\U000ed9a2\U000734d1' alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000fc62d\x1d\x81\x17ÏA', 'D\x14¹\x06\U00082ae6\x11\U000593c6E¨\U000ed9a2\U000734d1', BWTRLENCD({'terminator': '\x00'})) kwargs = {}, initial_draws = 2, start = 7797837.389790712, result = None finish = 7797837.693217908, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=303427) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 303.43ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 107 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 303.43ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000fc62d\\x1d\\x81\\x17Ï...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='\U000fc62d\x1d\x81\x17ÏA', right='D\x14¹\x06\U00082ae6\x11\U000593c6E¨\U000ed9a2\U000734d1', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=BWTRLENCD({'terminator': '\x00'}), E left='\U000fc62d\x1d\x81\x17ÏA', E right='D\x14¹\x06\U00082ae6\x11\U000593c6E¨\U000ed9a2\U000734d1', E ) E Unreliable test timings! On an initial run, this test took 303.43ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 196.73 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg6] ______________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7615fce0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '\x7f', alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\x7f', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})), kwargs = {} initial_draws = 2, start = 7797843.862851343, result = None finish = 7797844.301644138, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=438793) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 438.79ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 438.79ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\x7f', EntropyNCD({'qv...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='\x7f', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='', E right='\x7f', E ) E Unreliable test timings! On an initial run, this test took 438.79ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.80 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_qval[3-JaroWinkler] ___________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 22 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 22 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) function = .run at 0xffffff7fdba2a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U00062fe5', right = '|\U000d9ccep', alg = 'JaroWinkler', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00062fe5', '|\U000d9ccep', 'JaroWinkler', 3), kwargs = {} initial_draws = 2, start = 7797842.20723196, result = None finish = 7797842.548653757, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=341422) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 341.42ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 22 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 341.42ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00062fe5', '|\\U000d9cce...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\U00062fe5', right='|\U000d9ccep', alg='JaroWinkler', qval=3) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=3, E left='\U00062fe5', E right='|\U000d9ccep', E ) E Unreliable test timings! On an initial run, this test took 341.42ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.70 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_monotonicity_compressor _________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 21 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 21 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 21 bytes, frozen) function = .run at 0xffffff8bcc5bc0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 21 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000e41dda', right = '\U00014c36' @hypothesis.given( > left=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) tests/test_compression/test_entropy_ncd.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000e41dda', '\U00014c36'), kwargs = {}, initial_draws = 2 start = 7797847.849967501, result = None, finish = 7797848.294958796 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=444991) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 444.99ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) tests/test_compression/test_entropy_ncd.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 21 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 444.99ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000e41dda', '\\U00014c36...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_monotonicity_compressor(left='\U000e41dda', right='\U00014c36') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_monotonicity_compressor( E left='\U000e41dda', E right='\U00014c36', E ) E Unreliable test timings! On an initial run, this test took 444.99ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.38 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg10] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 28 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 28 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 28 bytes, frozen) function = .run at 0xffffffa63aaca0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 28 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U00015cf5Ø', right = 'èU\x90' alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00015cf5Ø', 'èU\x90', Jaccard({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797858.394149189, result = None finish = 7797858.894977183, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=500828) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 500.83ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 28 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 500.83ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00015cf5Ø', 'èU\\x90', J...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U00015cf5Ø', right='èU\x90', alg=Jaccard({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Jaccard({'qval': 1, 'as_set': False, 'external': True}), E left='\U00015cf5Ø', E right='èU\x90', E ) E Unreliable test timings! On an initial run, this test took 500.83ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.83 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_distributivity_compressor ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 33 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 33 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 33 bytes, frozen) function = .run at 0xffffff8b614040> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 33 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left1 = '𱶅§', left2 = '[\U0004114b¤', right = '£' @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) tests/test_compression/test_entropy_ncd.py:49: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𱶅§', '[\U0004114b¤', '£'), kwargs = {}, initial_draws = 3 start = 7797871.567359149, result = None, finish = 7797871.900704346 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=333345) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 333.35ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) tests/test_compression/test_entropy_ncd.py:49: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 33 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 333.35ms, which exceeds the deadline of 200.00ms'), "args = ('𱶅§', '[\\U0004114b¤', '£'),...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_distributivity_compressor(left1='𱶅§', left2='[\U0004114b¤', right='£') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_distributivity_compressor( E left1='𱶅§', E left2='[\U0004114b¤', E right='£', E ) E Unreliable test timings! On an initial run, this test took 333.35ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.72 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg0] ___________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7615ff60> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0', right = '' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 7797862.371613047, result = None finish = 7797862.704434843, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=332822) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 332.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 332.82ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', ArithNCD({'base': 2...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='0', right='', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 332.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 6.63 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg2] _________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7d140c20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = 'Å', alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'Å', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 7797874.27448852, result = None, finish = 7797874.663363916 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=388875) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 388.88ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 388.88ms, which exceeds the deadline of 200.00ms'), "args = ('', 'Å', BZ2NCD({})), kwargs...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='Å', alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=BZ2NCD({}), E left='', E right='Å', E ) E Unreliable test timings! On an initial run, this test took 388.88ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 30.84 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_normalization_range ___________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 11 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 11 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 11 bytes, frozen) function = .run at 0xffffff8bd58860> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 11 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U00050980\x80' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00050980\x80',), kwargs = {}, initial_draws = 1 start = 7797891.751004635, result = None, finish = 7797892.095970431 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=344966) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 344.97ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 11 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 344.97ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00050980\\x80',), kwargs...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(text='\U00050980\x80') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E text='\U00050980\x80', E ) E Unreliable test timings! On an initial run, this test took 344.97ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.35 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg11] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 16 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 16 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) function = .run at 0xffffffa6141620> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '¥\U0003ca61\\' alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '¥\U0003ca61\\', Sorensen({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 7797889.789971956, result = None finish = 7797890.29586265, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=505891) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 505.89ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 16 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 505.89ms, which exceeds the deadline of 200.00ms'), "args = ('', '¥\\U0003ca61\\\\', Sore...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='¥\U0003ca61\\', alg=Sorensen({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Sorensen({'qval': 1, 'as_set': False, 'external': True}), E left='', E right='¥\U0003ca61\\', E ) E Unreliable test timings! On an initial run, this test took 505.89ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.20 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg3] _________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 62 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 62 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 62 bytes, frozen) function = .run at 0xffffff7d3e5a80> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 62 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '7', right = '\x06l\U000797b6\x90mÁ²o<ò\n\xa0', alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('7', '\x06l\U000797b6\x90mÁ²o<ò\n\xa0', RLENCD({'qval': 1})) kwargs = {}, initial_draws = 2, start = 7797910.260308039, result = None finish = 7797910.694024034, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=433716) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 433.72ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 62 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 433.72ms, which exceeds the deadline of 200.00ms'), "args = ('7', '\\x06l\\U000797b6\\x90...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='7', right='\x06l\U000797b6\x90mÁ²o<ò\n\xa0', alg=RLENCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=RLENCD({'qval': 1}), E left='7', E right='\x06l\U000797b6\x90mÁ²o<ò\n\xa0', E ) E Unreliable test timings! On an initial run, this test took 433.72ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.08 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg4] _________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 18 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 18 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 18 bytes, frozen) function = .run at 0xffffff7d2f7560> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 18 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '\U00084242ñ', alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U00084242ñ', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 7797939.813028825, result = None, finish = 7797940.096885422 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=283857) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 283.86ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 18 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 283.86ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U00084242ñ', ZLIBNCD(...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='\U00084242ñ', alg=ZLIBNCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=ZLIBNCD({}), E left='', E right='\U00084242ñ', E ) E Unreliable test timings! On an initial run, this test took 283.86ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.52 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg5] _________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff7d1423e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '?', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '?', SqrtNCD({'qval': 1})), kwargs = {}, initial_draws = 2 start = 7797968.211474424, result = None, finish = 7797968.495320721 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=283846) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 283.85ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 283.85ms, which exceeds the deadline of 200.00ms'), "args = ('', '?', SqrtNCD({'qval': 1}...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='?', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=SqrtNCD({'qval': 1}), E left='', E right='?', E ) E Unreliable test timings! On an initial run, this test took 283.85ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.95 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky =========================== short test summary info ============================ FAILED tests/test_common.py::test_normalization_by_one[alg0] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg12] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg0] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg1] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg0] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg13] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg12] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg1] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg13] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg14] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg13] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg2] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg3] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg2] - hypothesis.error... FAILED tests/test_common.py::test_normalization_same[alg15] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg16] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg14] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg4] - hypothesis.error... FAILED tests/test_common.py::test_normalization_range[alg15] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg16] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg5] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg5] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg16] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg17] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg7] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg18] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg18] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg6] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg8] - hypothesis.error... FAILED tests/test_common.py::test_normalization_range[alg17] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg7] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg7] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg18] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg19] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg19] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg11] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg21] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg21] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg22] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg21] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg11] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg23] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg22] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg22] - hypothesis.err... FAILED tests/test_external.py::test_qval[None-Jaro] - hypothesis.errors.Flaky... FAILED tests/test_external.py::test_qval[None-JaroWinkler] - hypothesis.error... FAILED tests/test_compression/test_common.py::test_is_normalized[alg1] - hypo... FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] - hypo... FAILED tests/test_external.py::test_qval[1-Jaro] - hypothesis.errors.FailedHe... FAILED tests/test_external.py::test_list_of_numbers[Jaro] - hypothesis.errors... FAILED tests/test_compression/test_common.py::test_simmetry[alg0] - hypothesi... FAILED tests/test_compression/test_common.py::test_simmetry[alg1] - hypothesi... FAILED tests/test_compression/test_common.py::test_is_normalized[alg3] - hypo... FAILED tests/test_external.py::test_qval[1-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_compression/test_common.py::test_is_normalized[alg4] - hypo... FAILED tests/test_compression/test_common.py::test_simmetry[alg2] - hypothesi... FAILED tests/test_external.py::test_qval[2-Jaro] - hypothesis.errors.Flaky: H... FAILED tests/test_compression/test_common.py::test_simmetry[alg3] - hypothesi... FAILED tests/test_compression/test_common.py::test_is_normalized[alg5] - hypo... FAILED tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor FAILED tests/test_common.py::test_normalization_range[alg23] - hypothesis.err... FAILED tests/test_compression/test_common.py::test_simmetry[alg4] - hypothesi... FAILED tests/test_compression/test_common.py::test_is_normalized[alg6] - hypo... FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set - h... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] - ... FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky - hypothe... FAILED tests/test_external.py::test_qval[2-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg0] - ... FAILED tests/test_common.py::test_normalization_range[alg8] - hypothesis.erro... FAILED tests/test_compression/test_common.py::test_simmetry[alg5] - hypothesi... FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set - ... FAILED tests/test_external.py::test_qval[3-Jaro] - hypothesis.errors.Flaky: H... FAILED tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor FAILED tests/test_compression/test_sqrt_ncd.py::test_normalization_range - hy... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg1] - ... FAILED tests/test_compression/test_common.py::test_simmetry[alg6] - hypothesi... FAILED tests/test_external.py::test_qval[3-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor FAILED tests/test_common.py::test_normalization_range[alg10] - hypothesis.err... FAILED tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] - hypo... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg2] - ... FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range FAILED tests/test_common.py::test_normalization_range[alg11] - hypothesis.err... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg3] - ... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg4] - ... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg5] - ... ================= 87 failed, 325 passed in 1296.68s (0:21:36) ================== RPM build errors: error: Bad exit status from /var/tmp/rpm-tmp.bOIX4x (%check) Bad exit status from /var/tmp/rpm-tmp.bOIX4x (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec