Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2257044-54302/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1705536000 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2257044-54302/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1705449600 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.hVfwjS + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.v6rCRI + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2257044-54302/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1705449600 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.MqmO28 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.iEPAyF + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2257044-54302/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1705449600 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.9NmuM7 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.HsxuSm + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2257044-54302/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1705449600 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.lGzXC8 + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.HBKISh + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing /builddir/build/BUILD/textdistance-4.6.1 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) running dist_info creating /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance.egg-info writing /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance.egg-info/PKG-INFO writing dependency_links to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance.egg-info/dependency_links.txt writing requirements to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance.egg-info/requires.txt writing top-level names to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance.egg-info/top_level.txt writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance.egg-info/SOURCES.txt' reading manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-b2623lhw/textdistance-4.6.1.dist-info' Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: textdistance Building wheel for textdistance (pyproject.toml): started Running command Building wheel for textdistance (pyproject.toml) running bdist_wheel running build running build_py creating build creating build/lib creating build/lib/textdistance copying textdistance/__init__.py -> build/lib/textdistance copying textdistance/utils.py -> build/lib/textdistance copying textdistance/libraries.py -> build/lib/textdistance copying textdistance/benchmark.py -> build/lib/textdistance creating build/lib/textdistance/algorithms copying textdistance/algorithms/edit_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/simple.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/vector_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/sequence_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/__init__.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/compression_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/phonetic.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/base.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/types.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/token_based.py -> build/lib/textdistance/algorithms copying textdistance/libraries.json -> build/lib/textdistance installing to build/bdist.linux-riscv64/wheel running install running install_lib creating build/bdist.linux-riscv64 creating build/bdist.linux-riscv64/wheel creating build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/libraries.json -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/utils.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/libraries.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/benchmark.py -> build/bdist.linux-riscv64/wheel/textdistance creating build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/edit_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/simple.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/vector_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/sequence_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/compression_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/phonetic.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/base.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/types.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/token_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms running install_egg_info running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Copying textdistance.egg-info to build/bdist.linux-riscv64/wheel/textdistance-4.6.1-py3.12.egg-info running install_scripts creating build/bdist.linux-riscv64/wheel/textdistance-4.6.1.dist-info/WHEEL creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-wheel-vcwtx4wl/.tmp-3080xm_n/textdistance-4.6.1-py3-none-any.whl' and adding 'build/bdist.linux-riscv64/wheel' to it adding 'textdistance/__init__.py' adding 'textdistance/benchmark.py' adding 'textdistance/libraries.json' adding 'textdistance/libraries.py' adding 'textdistance/utils.py' adding 'textdistance/algorithms/__init__.py' adding 'textdistance/algorithms/base.py' adding 'textdistance/algorithms/compression_based.py' adding 'textdistance/algorithms/edit_based.py' adding 'textdistance/algorithms/phonetic.py' adding 'textdistance/algorithms/sequence_based.py' adding 'textdistance/algorithms/simple.py' adding 'textdistance/algorithms/token_based.py' adding 'textdistance/algorithms/types.py' adding 'textdistance/algorithms/vector_based.py' adding 'textdistance-4.6.1.dist-info/LICENSE' adding 'textdistance-4.6.1.dist-info/METADATA' adding 'textdistance-4.6.1.dist-info/WHEEL' adding 'textdistance-4.6.1.dist-info/top_level.txt' adding 'textdistance-4.6.1.dist-info/RECORD' removing build/bdist.linux-riscv64/wheel Building wheel for textdistance (pyproject.toml): finished with status 'done' Created wheel for textdistance: filename=textdistance-4.6.1-py3-none-any.whl size=31077 sha256=f80ee194862773d745ecf44ddb7aada7e67b4b03aabbf98f0af8ad9245cc4574 Stored in directory: /builddir/.cache/pip/wheels/af/08/72/d6baf94a0831066222f63a4e4a469ea938a661b7e8974b7b68 Successfully built textdistance + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.Tdlu2u + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 ++ xargs basename --multiple ++ ls /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=textdistance==4.6.1 + '[' -z textdistance==4.6.1 ']' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir textdistance==4.6.1 Using pip 23.3.1 from /usr/lib/python3.12/site-packages/pip (python 3.12) Looking in links: /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing ./pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl Installing collected packages: textdistance Successfully installed textdistance-4.6.1 + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + site_dirs+=("/usr/lib/python3.12/site-packages") + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages '!=' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --record /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED' ++ wc -l /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo ++ cut -f1 '-d ' + lines=1 + '[' 1 -ne 1 ']' + RPM_PERCENTAGES_COUNT=2 + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --sitelib /usr/lib/python3.12/site-packages --sitearch /usr/lib64/python3.12/site-packages --python-version 3.12 --pyproject-record /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record --prefix /usr -l textdistance + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 4.6.1-1.fc40 --unique-debug-suffix -4.6.1-1.fc40.noarch --unique-debug-src-base python-textdistance-4.6.1-1.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/textdistance-4.6.1 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.z7jYYa + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + k='not test_compare[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest -v -k 'not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' -n auto ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.3.2, pluggy-1.3.0 -- /usr/bin/python3 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/builddir/build/BUILD/textdistance-4.6.1/.hypothesis/examples') rootdir: /builddir/build/BUILD/textdistance-4.6.1 configfile: setup.cfg plugins: hypothesis-6.82.0, xdist-3.5.0 created: 8/8 workers 8 workers [412 items] scheduling tests via LoadScheduling tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_monotonic[alg12] tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_monotonic[alg0] [gw4] [ 0%] FAILED tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_same[alg1] [gw1] [ 0%] PASSED tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_range[alg13] [gw5] [ 0%] PASSED tests/test_common.py::test_normalization_same[alg12] [gw6] [ 0%] PASSED tests/test_common.py::test_normalization_monotonic[alg0] tests/test_common.py::test_normalization_monotonic[alg1] tests/test_common.py::test_normalization_same[alg13] [gw7] [ 1%] PASSED tests/test_common.py::test_normalization_monotonic[alg12] tests/test_common.py::test_normalization_monotonic[alg13] [gw2] [ 1%] FAILED tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_by_one[alg1] [gw0] [ 1%] FAILED tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_range[alg1] [gw1] [ 1%] PASSED tests/test_common.py::test_normalization_range[alg13] tests/test_common.py::test_normalization_range[alg14] [gw3] [ 2%] FAILED tests/test_common.py::test_normalization_by_one[alg12] [gw5] [ 2%] PASSED tests/test_common.py::test_normalization_same[alg13] tests/test_common.py::test_normalization_by_one[alg13] tests/test_common.py::test_normalization_same[alg14] [gw2] [ 2%] FAILED tests/test_common.py::test_normalization_by_one[alg1] tests/test_common.py::test_normalization_by_one[alg2] [gw4] [ 2%] FAILED tests/test_common.py::test_normalization_same[alg1] tests/test_common.py::test_normalization_same[alg2] [gw7] [ 3%] PASSED tests/test_common.py::test_normalization_monotonic[alg13] tests/test_common.py::test_normalization_monotonic[alg14] [gw5] [ 3%] PASSED tests/test_common.py::test_normalization_same[alg14] [gw6] [ 3%] PASSED tests/test_common.py::test_normalization_monotonic[alg1] tests/test_common.py::test_normalization_same[alg15] tests/test_common.py::test_normalization_monotonic[alg2] [gw3] [ 3%] PASSED tests/test_common.py::test_normalization_by_one[alg13] tests/test_common.py::test_normalization_by_one[alg14] [gw2] [ 4%] FAILED tests/test_common.py::test_normalization_by_one[alg2] tests/test_common.py::test_normalization_by_one[alg3] [gw0] [ 4%] FAILED tests/test_common.py::test_normalization_range[alg1] tests/test_common.py::test_normalization_range[alg2] [gw4] [ 4%] PASSED tests/test_common.py::test_normalization_same[alg2] tests/test_common.py::test_normalization_same[alg3] [gw5] [ 4%] PASSED tests/test_common.py::test_normalization_same[alg15] [gw7] [ 5%] PASSED tests/test_common.py::test_normalization_monotonic[alg14] tests/test_common.py::test_normalization_same[alg16] tests/test_common.py::test_normalization_monotonic[alg15] [gw1] [ 5%] FAILED tests/test_common.py::test_normalization_range[alg14] tests/test_common.py::test_normalization_range[alg15] [gw6] [ 5%] PASSED tests/test_common.py::test_normalization_monotonic[alg2] tests/test_common.py::test_normalization_monotonic[alg3] [gw0] [ 5%] PASSED tests/test_common.py::test_normalization_range[alg2] [gw2] [ 6%] PASSED tests/test_common.py::test_normalization_by_one[alg3] tests/test_common.py::test_normalization_by_one[alg4] tests/test_common.py::test_normalization_range[alg3] [gw4] [ 6%] PASSED tests/test_common.py::test_normalization_same[alg3] tests/test_common.py::test_normalization_same[alg4] [gw3] [ 6%] FAILED tests/test_common.py::test_normalization_by_one[alg14] tests/test_common.py::test_normalization_by_one[alg15] [gw5] [ 6%] PASSED tests/test_common.py::test_normalization_same[alg16] tests/test_common.py::test_normalization_same[alg17] [gw4] [ 7%] FAILED tests/test_common.py::test_normalization_same[alg4] tests/test_common.py::test_normalization_same[alg5] [gw7] [ 7%] PASSED tests/test_common.py::test_normalization_monotonic[alg15] tests/test_common.py::test_normalization_monotonic[alg16] [gw0] [ 7%] PASSED tests/test_common.py::test_normalization_range[alg3] tests/test_common.py::test_normalization_range[alg4] [gw6] [ 7%] PASSED tests/test_common.py::test_normalization_monotonic[alg3] tests/test_common.py::test_normalization_monotonic[alg4] [gw1] [ 8%] FAILED tests/test_common.py::test_normalization_range[alg15] tests/test_common.py::test_normalization_range[alg16] [gw4] [ 8%] PASSED tests/test_common.py::test_normalization_same[alg5] tests/test_common.py::test_normalization_same[alg6] [gw3] [ 8%] PASSED tests/test_common.py::test_normalization_by_one[alg15] tests/test_common.py::test_normalization_by_one[alg16] [gw2] [ 8%] FAILED tests/test_common.py::test_normalization_by_one[alg4] tests/test_common.py::test_normalization_by_one[alg5] [gw0] [ 8%] PASSED tests/test_common.py::test_normalization_range[alg4] tests/test_common.py::test_normalization_range[alg5] [gw6] [ 9%] PASSED tests/test_common.py::test_normalization_monotonic[alg4] tests/test_common.py::test_normalization_monotonic[alg5] [gw4] [ 9%] PASSED tests/test_common.py::test_normalization_same[alg6] tests/test_common.py::test_normalization_same[alg7] [gw5] [ 9%] FAILED tests/test_common.py::test_normalization_same[alg17] tests/test_common.py::test_normalization_same[alg18] [gw7] [ 9%] PASSED tests/test_common.py::test_normalization_monotonic[alg16] tests/test_common.py::test_normalization_monotonic[alg17] [gw3] [ 10%] FAILED tests/test_common.py::test_normalization_by_one[alg16] tests/test_common.py::test_normalization_by_one[alg17] [gw2] [ 10%] FAILED tests/test_common.py::test_normalization_by_one[alg5] [gw4] [ 10%] FAILED tests/test_common.py::test_normalization_same[alg7] tests/test_common.py::test_normalization_by_one[alg6] tests/test_common.py::test_normalization_same[alg8] [gw0] [ 10%] PASSED tests/test_common.py::test_normalization_range[alg5] tests/test_common.py::test_normalization_range[alg6] [gw6] [ 11%] PASSED tests/test_common.py::test_normalization_monotonic[alg5] tests/test_common.py::test_normalization_monotonic[alg6] [gw7] [ 11%] PASSED tests/test_common.py::test_normalization_monotonic[alg17] tests/test_common.py::test_normalization_monotonic[alg18] [gw5] [ 11%] FAILED tests/test_common.py::test_normalization_same[alg18] tests/test_common.py::test_normalization_same[alg19] [gw2] [ 11%] FAILED tests/test_common.py::test_normalization_by_one[alg6] tests/test_common.py::test_normalization_by_one[alg7] [gw3] [ 12%] FAILED tests/test_common.py::test_normalization_by_one[alg17] [gw0] [ 12%] PASSED tests/test_common.py::test_normalization_range[alg6] tests/test_common.py::test_normalization_range[alg7] tests/test_common.py::test_normalization_by_one[alg18] [gw4] [ 12%] FAILED tests/test_common.py::test_normalization_same[alg8] tests/test_common.py::test_normalization_same[alg9] [gw5] [ 12%] PASSED tests/test_common.py::test_normalization_same[alg19] tests/test_common.py::test_normalization_same[alg20] [gw7] [ 13%] PASSED tests/test_common.py::test_normalization_monotonic[alg18] tests/test_common.py::test_normalization_monotonic[alg19] [gw0] [ 13%] FAILED tests/test_common.py::test_normalization_range[alg7] tests/test_common.py::test_normalization_range[alg8] [gw6] [ 13%] PASSED tests/test_common.py::test_normalization_monotonic[alg6] tests/test_common.py::test_normalization_monotonic[alg7] [gw0] [ 13%] FAILED tests/test_common.py::test_normalization_range[alg8] tests/test_common.py::test_normalization_range[alg9] [gw4] [ 14%] PASSED tests/test_common.py::test_normalization_same[alg9] tests/test_common.py::test_normalization_same[alg10] [gw3] [ 14%] PASSED tests/test_common.py::test_normalization_by_one[alg18] [gw5] [ 14%] PASSED tests/test_common.py::test_normalization_same[alg20] tests/test_common.py::test_normalization_same[alg21] tests/test_common.py::test_normalization_by_one[alg19] [gw7] [ 14%] PASSED tests/test_common.py::test_normalization_monotonic[alg19] tests/test_common.py::test_normalization_monotonic[alg20] [gw2] [ 15%] FAILED tests/test_common.py::test_normalization_by_one[alg7] tests/test_common.py::test_normalization_by_one[alg8] [gw6] [ 15%] PASSED tests/test_common.py::test_normalization_monotonic[alg7] tests/test_common.py::test_normalization_monotonic[alg8] [gw3] [ 15%] PASSED tests/test_common.py::test_normalization_by_one[alg19] tests/test_common.py::test_normalization_by_one[alg20] [gw4] [ 15%] FAILED tests/test_common.py::test_normalization_same[alg10] tests/test_common.py::test_normalization_same[alg11] [gw0] [ 16%] FAILED tests/test_common.py::test_normalization_range[alg9] tests/test_common.py::test_normalization_range[alg10] [gw7] [ 16%] PASSED tests/test_common.py::test_normalization_monotonic[alg20] tests/test_common.py::test_normalization_monotonic[alg21] [gw5] [ 16%] FAILED tests/test_common.py::test_normalization_same[alg21] tests/test_common.py::test_normalization_same[alg22] [gw2] [ 16%] PASSED tests/test_common.py::test_normalization_by_one[alg8] tests/test_common.py::test_normalization_by_one[alg9] [gw3] [ 16%] PASSED tests/test_common.py::test_normalization_by_one[alg20] tests/test_common.py::test_normalization_by_one[alg21] [gw6] [ 17%] PASSED tests/test_common.py::test_normalization_monotonic[alg8] tests/test_common.py::test_normalization_monotonic[alg9] [gw0] [ 17%] PASSED tests/test_common.py::test_normalization_range[alg10] tests/test_common.py::test_normalization_range[alg11] [gw4] [ 17%] FAILED tests/test_common.py::test_normalization_same[alg11] tests/test_common.py::test_no_common_chars[alg0] [gw4] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg0] tests/test_common.py::test_no_common_chars[alg1] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg1] tests/test_common.py::test_no_common_chars[alg2] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg2] tests/test_common.py::test_no_common_chars[alg3] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg3] tests/test_common.py::test_no_common_chars[alg4] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg4] tests/test_common.py::test_no_common_chars[alg5] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg5] [gw5] [ 19%] PASSED tests/test_common.py::test_normalization_same[alg22] tests/test_common.py::test_no_common_chars[alg6] tests/test_common.py::test_normalization_same[alg23] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg6] tests/test_common.py::test_no_common_chars[alg7] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg7] tests/test_common.py::test_no_common_chars[alg8] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg8] tests/test_common.py::test_no_common_chars[alg9] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg9] tests/test_common.py::test_no_common_chars[alg10] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg10] tests/test_common.py::test_no_common_chars[alg11] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg11] tests/test_common.py::test_no_common_chars[alg12] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg12] tests/test_common.py::test_no_common_chars[alg13] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg13] tests/test_common.py::test_no_common_chars[alg14] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg14] tests/test_common.py::test_no_common_chars[alg15] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg15] tests/test_common.py::test_no_common_chars[alg16] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg16] [gw7] [ 22%] PASSED tests/test_common.py::test_normalization_monotonic[alg21] tests/test_common.py::test_no_common_chars[alg17] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg17] tests/test_common.py::test_unequal_distance[alg3] tests/test_common.py::test_normalization_monotonic[alg22] [gw4] [ 22%] PASSED tests/test_common.py::test_unequal_distance[alg3] tests/test_common.py::test_unequal_distance[alg4] [gw4] [ 23%] PASSED tests/test_common.py::test_unequal_distance[alg4] tests/test_common.py::test_unequal_distance[alg5] [gw4] [ 23%] PASSED tests/test_common.py::test_unequal_distance[alg5] tests/test_common.py::test_unequal_distance[alg6] [gw2] [ 23%] PASSED tests/test_common.py::test_normalization_by_one[alg9] [gw4] [ 23%] PASSED tests/test_common.py::test_unequal_distance[alg6] tests/test_common.py::test_unequal_distance[alg7] tests/test_common.py::test_normalization_by_one[alg10] [gw4] [ 24%] PASSED tests/test_common.py::test_unequal_distance[alg7] tests/test_common.py::test_unequal_distance[alg8] [gw4] [ 24%] PASSED tests/test_common.py::test_unequal_distance[alg8] tests/test_common.py::test_unequal_distance[alg9] [gw4] [ 24%] PASSED tests/test_common.py::test_unequal_distance[alg9] tests/test_common.py::test_unequal_distance[alg10] [gw4] [ 24%] PASSED tests/test_common.py::test_unequal_distance[alg10] [gw3] [ 25%] FAILED tests/test_common.py::test_normalization_by_one[alg21] tests/test_common.py::test_normalization_by_one[alg22] tests/test_common.py::test_unequal_distance[alg11] [gw4] [ 25%] PASSED tests/test_common.py::test_unequal_distance[alg11] tests/test_common.py::test_unequal_distance[alg12] [gw4] [ 25%] PASSED tests/test_common.py::test_unequal_distance[alg12] tests/test_common.py::test_unequal_distance[alg13] [gw4] [ 25%] PASSED tests/test_common.py::test_unequal_distance[alg13] tests/test_common.py::test_unequal_distance[alg14] [gw4] [ 25%] PASSED tests/test_common.py::test_unequal_distance[alg14] tests/test_common.py::test_unequal_distance[alg15] [gw4] [ 26%] PASSED tests/test_common.py::test_unequal_distance[alg15] tests/test_common.py::test_unequal_distance[alg16] [gw4] [ 26%] PASSED tests/test_common.py::test_unequal_distance[alg16] tests/test_common.py::test_unequal_distance[alg17] [gw4] [ 26%] PASSED tests/test_common.py::test_unequal_distance[alg17] tests/test_common.py::test_unequal_distance[alg18] [gw4] [ 26%] PASSED tests/test_common.py::test_unequal_distance[alg18] tests/test_common.py::test_unequal_distance[alg19] [gw4] [ 27%] PASSED tests/test_common.py::test_unequal_distance[alg19] tests/test_common.py::test_unequal_distance[alg20] [gw4] [ 27%] PASSED tests/test_common.py::test_unequal_distance[alg20] tests/test_common.py::test_unequal_distance[alg21] [gw4] [ 27%] PASSED tests/test_common.py::test_unequal_distance[alg21] tests/test_common.py::test_unequal_distance[alg22] [gw4] [ 27%] PASSED tests/test_common.py::test_unequal_distance[alg22] tests/test_common.py::test_unequal_distance[alg23] [gw4] [ 28%] PASSED tests/test_common.py::test_unequal_distance[alg23] tests/test_external.py::test_compare[Jaro] [gw6] [ 28%] PASSED tests/test_common.py::test_normalization_monotonic[alg9] tests/test_common.py::test_normalization_monotonic[alg10] [gw0] [ 28%] FAILED tests/test_common.py::test_normalization_range[alg11] tests/test_common.py::test_no_common_chars[alg18] [gw0] [ 28%] PASSED tests/test_common.py::test_no_common_chars[alg18] [gw5] [ 29%] PASSED tests/test_common.py::test_normalization_same[alg23] tests/test_common.py::test_no_common_chars[alg19] [gw0] [ 29%] PASSED tests/test_common.py::test_no_common_chars[alg19] tests/test_common.py::test_empty[alg11] [gw5] [ 29%] PASSED tests/test_common.py::test_empty[alg11] tests/test_common.py::test_no_common_chars[alg20] tests/test_common.py::test_empty[alg12] [gw5] [ 29%] PASSED tests/test_common.py::test_empty[alg12] tests/test_common.py::test_empty[alg13] [gw5] [ 30%] PASSED tests/test_common.py::test_empty[alg13] tests/test_common.py::test_empty[alg14] [gw0] [ 30%] PASSED tests/test_common.py::test_no_common_chars[alg20] [gw5] [ 30%] PASSED tests/test_common.py::test_empty[alg14] tests/test_common.py::test_no_common_chars[alg21] [gw0] [ 30%] PASSED tests/test_common.py::test_no_common_chars[alg21] tests/test_common.py::test_empty[alg15] tests/test_common.py::test_no_common_chars[alg22] [gw5] [ 31%] PASSED tests/test_common.py::test_empty[alg15] [gw0] [ 31%] PASSED tests/test_common.py::test_no_common_chars[alg22] tests/test_common.py::test_no_common_chars[alg23] [gw0] [ 31%] PASSED tests/test_common.py::test_no_common_chars[alg23] tests/test_common.py::test_empty[alg0] [gw0] [ 31%] PASSED tests/test_common.py::test_empty[alg0] tests/test_common.py::test_empty[alg16] [gw5] [ 32%] PASSED tests/test_common.py::test_empty[alg16] tests/test_common.py::test_empty[alg17] tests/test_common.py::test_empty[alg1] [gw0] [ 32%] PASSED tests/test_common.py::test_empty[alg1] [gw3] [ 32%] FAILED tests/test_common.py::test_normalization_by_one[alg22] [gw5] [ 32%] PASSED tests/test_common.py::test_empty[alg17] tests/test_common.py::test_empty[alg2] [gw0] [ 33%] PASSED tests/test_common.py::test_empty[alg2] tests/test_common.py::test_empty[alg18] tests/test_common.py::test_empty[alg3] [gw5] [ 33%] PASSED tests/test_common.py::test_empty[alg18] tests/test_common.py::test_empty[alg19] [gw0] [ 33%] PASSED tests/test_common.py::test_empty[alg3] tests/test_common.py::test_empty[alg4] tests/test_common.py::test_normalization_by_one[alg23] [gw0] [ 33%] PASSED tests/test_common.py::test_empty[alg4] [gw5] [ 33%] PASSED tests/test_common.py::test_empty[alg19] tests/test_common.py::test_empty[alg20] [gw5] [ 34%] PASSED tests/test_common.py::test_empty[alg20] tests/test_common.py::test_empty[alg5] [gw0] [ 34%] PASSED tests/test_common.py::test_empty[alg5] tests/test_common.py::test_empty[alg6] [gw0] [ 34%] PASSED tests/test_common.py::test_empty[alg6] tests/test_common.py::test_empty[alg21] tests/test_common.py::test_empty[alg7] [gw0] [ 34%] PASSED tests/test_common.py::test_empty[alg7] tests/test_common.py::test_empty[alg8] [gw5] [ 35%] PASSED tests/test_common.py::test_empty[alg21] tests/test_common.py::test_empty[alg22] [gw0] [ 35%] PASSED tests/test_common.py::test_empty[alg8] [gw5] [ 35%] PASSED tests/test_common.py::test_empty[alg22] tests/test_common.py::test_empty[alg9] tests/test_common.py::test_empty[alg23] [gw5] [ 35%] PASSED tests/test_common.py::test_empty[alg23] tests/test_common.py::test_unequal_distance[alg0] [gw5] [ 36%] PASSED tests/test_common.py::test_unequal_distance[alg0] [gw2] [ 36%] PASSED tests/test_common.py::test_normalization_by_one[alg10] [gw0] [ 36%] PASSED tests/test_common.py::test_empty[alg9] tests/test_common.py::test_empty[alg10] [gw0] [ 36%] PASSED tests/test_common.py::test_empty[alg10] tests/test_common.py::test_unequal_distance[alg1] [gw5] [ 37%] PASSED tests/test_common.py::test_unequal_distance[alg1] tests/test_common.py::test_unequal_distance[alg2] [gw5] [ 37%] PASSED tests/test_common.py::test_unequal_distance[alg2] tests/test_compression/test_common.py::test_monotonicity[alg1] tests/test_compression/test_common.py::test_simmetry[alg3] tests/test_common.py::test_normalization_by_one[alg11] [gw0] [ 37%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg1] tests/test_compression/test_common.py::test_monotonicity[alg2] [gw0] [ 37%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg2] tests/test_compression/test_common.py::test_monotonicity[alg3] [gw0] [ 38%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg3] tests/test_compression/test_common.py::test_monotonicity[alg4] [gw0] [ 38%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg4] tests/test_compression/test_common.py::test_monotonicity[alg5] [gw0] [ 38%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg5] tests/test_compression/test_common.py::test_monotonicity[alg6] [gw0] [ 38%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg6] tests/test_compression/test_common.py::test_simmetry[alg0] [gw7] [ 39%] PASSED tests/test_common.py::test_normalization_monotonic[alg22] tests/test_common.py::test_normalization_monotonic[alg23] [gw4] [ 39%] PASSED tests/test_external.py::test_compare[Jaro] tests/test_external.py::test_compare[JaroWinkler] [gw6] [ 39%] PASSED tests/test_common.py::test_normalization_monotonic[alg10] tests/test_common.py::test_normalization_monotonic[alg11] [gw1] [ 39%] FAILED tests/test_common.py::test_normalization_range[alg16] tests/test_common.py::test_normalization_range[alg17] [gw5] [ 40%] FAILED tests/test_compression/test_common.py::test_simmetry[alg3] tests/test_compression/test_common.py::test_simmetry[alg4] [gw3] [ 40%] FAILED tests/test_common.py::test_normalization_by_one[alg23] tests/test_external.py::test_qval[3-Jaro] [gw4] [ 40%] PASSED tests/test_external.py::test_compare[JaroWinkler] tests/test_external.py::test_qval[None-Jaro] [gw2] [ 40%] FAILED tests/test_common.py::test_normalization_by_one[alg11] tests/test_compression/test_common.py::test_is_normalized[alg4] [gw6] [ 41%] PASSED tests/test_common.py::test_normalization_monotonic[alg11] tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor [gw7] [ 41%] PASSED tests/test_common.py::test_normalization_monotonic[alg23] tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] [gw7] [ 41%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] [gw7] [ 41%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor [gw1] [ 41%] FAILED tests/test_common.py::test_normalization_range[alg17] tests/test_common.py::test_normalization_range[alg18] [gw5] [ 42%] PASSED tests/test_compression/test_common.py::test_simmetry[alg4] tests/test_compression/test_common.py::test_simmetry[alg5] [gw6] [ 42%] PASSED tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor [gw4] [ 42%] PASSED tests/test_external.py::test_qval[None-Jaro] tests/test_external.py::test_qval[None-JaroWinkler] [gw1] [ 42%] FAILED tests/test_common.py::test_normalization_range[alg18] tests/test_common.py::test_normalization_range[alg19] [gw2] [ 43%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg4] tests/test_compression/test_common.py::test_is_normalized[alg5] [gw3] [ 43%] FAILED tests/test_external.py::test_qval[3-Jaro] tests/test_external.py::test_qval[3-JaroWinkler] [gw3] [ 43%] FAILED tests/test_external.py::test_qval[3-JaroWinkler] tests/test_external.py::test_list_of_numbers[Jaro] [gw4] [ 43%] FAILED tests/test_external.py::test_qval[None-JaroWinkler] tests/test_external.py::test_qval[1-Jaro] [gw3] [ 44%] FAILED tests/test_external.py::test_list_of_numbers[Jaro] tests/test_external.py::test_list_of_numbers[JaroWinkler] [gw6] [ 44%] FAILED tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor tests/test_compression/test_sqrt_ncd.py::test_normalization_range [gw7] [ 44%] FAILED tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor [gw1] [ 44%] PASSED tests/test_common.py::test_normalization_range[alg19] tests/test_common.py::test_normalization_range[alg20] [gw2] [ 45%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg5] tests/test_compression/test_common.py::test_is_normalized[alg6] [gw0] [ 45%] FAILED tests/test_compression/test_common.py::test_simmetry[alg0] tests/test_compression/test_common.py::test_simmetry[alg1] [gw5] [ 45%] FAILED tests/test_compression/test_common.py::test_simmetry[alg5] tests/test_compression/test_common.py::test_simmetry[alg6] [gw4] [ 45%] PASSED tests/test_external.py::test_qval[1-Jaro] tests/test_external.py::test_qval[1-JaroWinkler] [gw6] [ 46%] PASSED tests/test_compression/test_sqrt_ncd.py::test_normalization_range tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] [gw0] [ 48%] FAILED tests/test_compression/test_common.py::test_simmetry[alg1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] [gw7] [ 48%] PASSED tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor tests/test_compression/test_common.py::test_simmetry[alg2] tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] [gw6] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] [gw6] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] [gw6] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] [gw6] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] [gw6] [ 52%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] tests/test_edit/test_editex.py::test_distance[--0] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[--0] tests/test_edit/test_editex.py::test_distance[nelson--12] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[nelson--12] tests/test_edit/test_editex.py::test_distance[-neilsen-14] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[-neilsen-14] tests/test_edit/test_editex.py::test_distance[ab-a-2] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[ab-a-2] tests/test_edit/test_editex.py::test_distance[ab-c-4] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[ab-c-4] tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] tests/test_edit/test_editex.py::test_distance[-MARTHA-12] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[-MARTHA-12] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] [gw2] [ 55%] PASSED tests/test_compression/test_common.py::test_is_normalized[alg6] tests/test_compression/test_common.py::test_normalized_by_one[alg0] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] [gw6] [ 55%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] [gw6] [ 55%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] [gw3] [ 55%] FAILED tests/test_external.py::test_list_of_numbers[JaroWinkler] tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] [gw6] [ 56%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] [gw3] [ 56%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] [gw3] [ 56%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] tests/test_compression/test_arith_ncd.py::test_make_probs [gw3] [ 56%] PASSED tests/test_compression/test_arith_ncd.py::test_make_probs tests/test_compression/test_arith_ncd.py::test_arith_output [gw3] [ 57%] PASSED tests/test_compression/test_arith_ncd.py::test_arith_output tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] [gw3] [ 57%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] [gw3] [ 57%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] [gw3] [ 57%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] [gw3] [ 58%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] [gw6] [ 58%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] tests/test_compression/test_common.py::test_monotonicity[alg0] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] [gw6] [ 58%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw3] [ 58%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg0] tests/test_edit/test_hamming.py::test_distance[test-text-1] [gw6] [ 58%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_hamming.py::test_distance[test-tet-2] [gw3] [ 59%] PASSED tests/test_edit/test_hamming.py::test_distance[test-text-1] [gw6] [ 59%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tet-2] tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] [gw6] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] tests/test_edit/test_hamming.py::test_distance[test-tset-2] [gw3] [ 59%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tset-2] [gw6] [ 60%] PASSED tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] tests/test_edit/test_hamming.py::test_distance[test-qwe-4] [gw1] [ 60%] PASSED tests/test_common.py::test_normalization_range[alg20] tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] [gw6] [ 60%] PASSED tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] tests/test_common.py::test_normalization_range[alg21] tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] [gw3] [ 60%] PASSED tests/test_edit/test_hamming.py::test_distance[test-qwe-4] tests/test_edit/test_hamming.py::test_distance[test-testit-2] [gw3] [ 61%] PASSED tests/test_edit/test_hamming.py::test_distance[test-testit-2] [gw6] [ 61%] PASSED tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] tests/test_edit/test_hamming.py::test_distance[test-tesst-2] tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] [gw6] [ 61%] PASSED tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] [gw3] [ 61%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tesst-2] tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] [gw6] [ 62%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] [gw3] [ 62%] PASSED tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] [gw3] [ 62%] PASSED tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] [gw3] [ 62%] PASSED tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] [gw3] [ 63%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] [gw6] [ 63%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] [gw3] [ 63%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] [gw6] [ 63%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] [gw6] [ 64%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] [gw3] [ 64%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] [gw6] [ 64%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] [gw3] [ 64%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] tests/test_edit/test_levenshtein.py::test_distance[test-text-1] tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] [gw3] [ 65%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] [gw3] [ 65%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] [gw6] [ 65%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-text-1] tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] tests/test_edit/test_matrix.py::test_distance[A-C--3] [gw6] [ 65%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] tests/test_edit/test_matrix.py::test_distance[--1] [gw6] [ 66%] PASSED tests/test_edit/test_matrix.py::test_distance[--1] [gw7] [ 66%] PASSED tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor tests/test_edit/test_matrix.py::test_distance[-a-0] [gw6] [ 66%] PASSED tests/test_edit/test_matrix.py::test_distance[-a-0] tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] [gw3] [ 66%] PASSED tests/test_edit/test_matrix.py::test_distance[A-C--3] tests/test_edit/test_matrix.py::test_distance[G-G-7] [gw3] [ 66%] PASSED tests/test_edit/test_matrix.py::test_distance[G-G-7] tests/test_edit/test_matrix.py::test_distance[A-A-10] [gw6] [ 67%] PASSED tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] [gw3] [ 67%] PASSED tests/test_edit/test_matrix.py::test_distance[A-A-10] tests/test_edit/test_matrix.py::test_distance[A-G--1] [gw6] [ 67%] PASSED tests/test_edit/test_matrix.py::test_distance[A-G--1] tests/test_edit/test_matrix.py::test_distance[C-T-0] [gw6] [ 67%] PASSED tests/test_edit/test_matrix.py::test_distance[C-T-0] tests/test_edit/test_mlipns.py::test_distance[--1] tests/test_edit/test_matrix.py::test_distance[T-A--4] [gw6] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[--1] [gw3] [ 68%] PASSED tests/test_edit/test_matrix.py::test_distance[T-A--4] tests/test_edit/test_mlipns.py::test_distance[a--0] [gw6] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[a--0] [gw4] [ 68%] PASSED tests/test_external.py::test_qval[1-JaroWinkler] tests/test_external.py::test_qval[2-Jaro] tests/test_edit/test_mlipns.py::test_distance[-a-0] tests/test_edit/test_matrix.py::test_distance[T-C-0] [gw6] [ 69%] PASSED tests/test_edit/test_mlipns.py::test_distance[-a-0] tests/test_edit/test_mlipns.py::test_distance[a-a-1] [gw3] [ 69%] PASSED tests/test_edit/test_matrix.py::test_distance[T-C-0] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] [gw0] [ 69%] FAILED tests/test_compression/test_common.py::test_simmetry[alg2] [gw3] [ 69%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] [gw6] [ 70%] PASSED tests/test_edit/test_mlipns.py::test_distance[a-a-1] tests/test_edit/test_mlipns.py::test_distance[ab-a-1] [gw3] [ 70%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] [gw0] [ 70%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] [gw6] [ 70%] PASSED tests/test_edit/test_mlipns.py::test_distance[ab-a-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] [gw0] [ 71%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] [gw6] [ 71%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] [gw6] [ 71%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] [gw0] [ 71%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] [gw3] [ 72%] PASSED tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] [gw6] [ 72%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] [gw0] [ 72%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] [gw3] [ 72%] PASSED tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] [gw0] [ 73%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw6] [ 73%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] [gw3] [ 73%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] [gw3] [ 73%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] [gw6] [ 74%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] [gw0] [ 74%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] [gw6] [ 74%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] [gw3] [ 74%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] [gw0] [ 75%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] [gw6] [ 75%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] [gw6] [ 75%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] [gw3] [ 75%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] [gw0] [ 75%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] [gw6] [ 76%] PASSED tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw3] [ 76%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] [gw0] [ 76%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] tests/test_phonetic/test_editex.py::test_distance[--0] tests/test_phonetic/test_editex.py::test_distance[ab-a-2] tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[--0] [gw0] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-a-2] [gw3] [ 77%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] tests/test_phonetic/test_editex.py::test_distance[nelson--12] [gw6] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson--12] tests/test_phonetic/test_editex.py::test_distance[ab-c-4] tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] [gw3] [ 77%] PASSED tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] [gw0] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-c-4] [gw3] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] [gw3] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] [gw0] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] [gw0] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] [gw6] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] [gw3] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] [gw6] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] [gw0] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] tests/test_phonetic/test_editex.py::test_local[--0] [gw0] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[--0] [gw3] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] tests/test_phonetic/test_editex.py::test_local[nelson--12] [gw6] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] [gw0] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson--12] tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] tests/test_phonetic/test_editex.py::test_local[-neilsen-14] [gw3] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] [gw6] [ 82%] PASSED tests/test_phonetic/test_editex.py::test_local[-neilsen-14] tests/test_phonetic/test_editex.py::test_local[ab-c-2] [gw0] [ 82%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-c-2] tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_local[ab-a-2] tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] [gw3] [ 82%] PASSED tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] [gw0] [ 82%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_local[niall-neal-1] [gw6] [ 83%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-a-2] tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] tests/test_phonetic/test_editex.py::test_local[neal-niall-1] [gw3] [ 83%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-neal-1] [gw0] [ 83%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] [gw3] [ 83%] PASSED tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] [gw6] [ 83%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-niall-1] [gw0] [ 84%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] [gw0] [ 84%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] [gw6] [ 84%] PASSED tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] [gw3] [ 84%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] [gw0] [ 85%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] [gw3] [ 85%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] [gw6] [ 85%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] [gw0] [ 85%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] [gw0] [ 86%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] [gw0] [ 86%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] [gw0] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] [gw3] [ 86%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] [gw6] [ 87%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] [gw5] [ 87%] FAILED tests/test_compression/test_common.py::test_simmetry[alg6] tests/test_compression/test_common.py::test_is_normalized[alg0] tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] [gw0] [ 87%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] [gw3] [ 87%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] [gw0] [ 88%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] [gw3] [ 88%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] [gw0] [ 88%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] [gw3] [ 88%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] [gw0] [ 89%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] [gw3] [ 89%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] tests/test_token/test_bag.py::test_distance[qwe-erty-3] tests/test_token/test_bag.py::test_distance[qwe-qwe-0] [gw3] [ 89%] PASSED tests/test_token/test_bag.py::test_distance[qwe-erty-3] [gw0] [ 89%] PASSED tests/test_token/test_bag.py::test_distance[qwe-qwe-0] tests/test_token/test_bag.py::test_distance[qwe-rtys-4] tests/test_token/test_bag.py::test_distance[qwe-ewq-0] [gw3] [ 90%] PASSED tests/test_token/test_bag.py::test_distance[qwe-rtys-4] tests/test_token/test_cosine.py::test_distance[test-text-0.75] [gw0] [ 90%] PASSED tests/test_token/test_bag.py::test_distance[qwe-ewq-0] [gw3] [ 90%] PASSED tests/test_token/test_cosine.py::test_distance[test-text-0.75] tests/test_token/test_jaccard.py::test_distance[test-text-0.6] tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] [gw3] [ 90%] PASSED tests/test_token/test_jaccard.py::test_distance[test-text-0.6] [gw6] [ 91%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] [gw0] [ 91%] PASSED tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] [gw3] [ 91%] PASSED tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] tests/test_token/test_jaccard.py::test_compare_with_tversky [gw6] [ 91%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] [gw0] [ 91%] PASSED tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] [gw0] [ 92%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] [gw0] [ 92%] PASSED tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] [gw0] [ 92%] PASSED tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] tests/test_token/test_sorensen.py::test_distance[test-text-0.75] [gw1] [ 92%] FAILED tests/test_common.py::test_normalization_range[alg21] tests/test_common.py::test_normalization_range[alg22] [gw0] [ 93%] PASSED tests/test_token/test_sorensen.py::test_distance[test-text-0.75] tests/test_token/test_sorensen.py::test_compare_with_tversky [gw7] [ 93%] PASSED tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor tests/test_compression/test_entropy_ncd.py::test_normalization_range [gw2] [ 93%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg0] tests/test_compression/test_common.py::test_normalized_by_one[alg1] [gw4] [ 93%] FAILED tests/test_external.py::test_qval[2-Jaro] tests/test_external.py::test_qval[2-JaroWinkler] [gw3] [ 94%] FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] [gw3] [ 94%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] [gw6] [ 94%] PASSED tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set tests/test_token/test_overlap.py::test_distance[test-text-0.75] [gw6] [ 94%] PASSED tests/test_token/test_overlap.py::test_distance[test-text-0.75] [gw1] [ 95%] FAILED tests/test_common.py::test_normalization_range[alg22] [gw0] [ 95%] PASSED tests/test_token/test_sorensen.py::test_compare_with_tversky tests/test_common.py::test_normalization_range[alg23] [gw5] [ 95%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] tests/test_compression/test_common.py::test_is_normalized[alg1] tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw7] [ 95%] FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] [gw7] [ 96%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] [gw7] [ 96%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor [gw2] [ 96%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg1] [gw4] [ 96%] PASSED tests/test_external.py::test_qval[2-JaroWinkler] tests/test_compression/test_common.py::test_normalized_by_one[alg2] [gw0] [ 97%] FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw5] [ 97%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg1] tests/test_compression/test_common.py::test_is_normalized[alg2] [gw1] [ 97%] FAILED tests/test_common.py::test_normalization_range[alg23] [gw7] [ 97%] PASSED tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor [gw5] [ 98%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] tests/test_compression/test_common.py::test_is_normalized[alg3] [gw7] [ 98%] PASSED tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor [gw2] [ 98%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg2] tests/test_compression/test_common.py::test_normalized_by_one[alg3] [gw5] [ 98%] PASSED tests/test_compression/test_common.py::test_is_normalized[alg3] [gw2] [ 99%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg3] tests/test_compression/test_common.py::test_normalized_by_one[alg4] [gw2] [ 99%] PASSED tests/test_compression/test_common.py::test_normalized_by_one[alg4] tests/test_compression/test_common.py::test_normalized_by_one[alg5] [gw2] [ 99%] PASSED tests/test_compression/test_common.py::test_normalized_by_one[alg5] tests/test_compression/test_common.py::test_normalized_by_one[alg6] [gw2] [ 99%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] [gw2] [100%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] =================================== FAILURES =================================== ________________________ test_normalization_same[alg0] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 13 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 13 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 13 bytes, frozen) function = .run at 0xffffff76c17920> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 13 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = 'È\U00040910\x98', alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('È\U00040910\x98', Bag({'qval': 1, 'external': True})), kwargs = {} initial_draws = 1, start = 5422908.779286293, result = None finish = 5422909.063481185, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=284195) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 284.19ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 13 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 284.19ms, which exceeds the deadline of 200.00ms'), "args = ('È\\U00040910\\x98', Bag({'q...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='È\U00040910\x98', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Bag({'qval': 1, 'external': True}), E text='È\U00040910\x98', E ) E Unreliable test timings! On an initial run, this test took 284.19ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.92 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg0] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 102 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 102 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 102 bytes, frozen) function = .run at 0xffffff982a2480> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 102 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x0eï\U0008050eËÿ\x93¥\U000c9a96\x0f\U000786274\U00036841\xad«\\ýìû' right = '', alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x0eï\U0008050eËÿ\x93¥\U000c9a96\x0f\U000786274\U00036841\xad«\\ýìû', '', Bag({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 5422911.678247631, result = None finish = 5422912.024898692, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=346651) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 346.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 102 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 346.65ms, which exceeds the deadline of 200.00ms'), "args = ('\\x0eï\\U0008050eËÿ\\x93¥\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x0eï\U0008050eËÿ\x93¥\U000c9a96\x0f\U000786274\U00036841\xad«\\ýìû', right='', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Bag({'qval': 1, 'external': True}), E left='\x0eï\U0008050eËÿ\x93¥\U000c9a96\x0f\U000786274\U00036841\xad«\\ýìû', E right='', E ) E Unreliable test timings! On an initial run, this test took 346.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.93 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg0] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 192 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 192 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 192 bytes, frozen) function = .run at 0xffffff867120c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 192 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'ÿ\x14/ãË\U000b192f\U00085cff³\x93C\x03\x1fÏ\x9aF埁 @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ÿ\x14/ãË\U000b192f\U00085cff³\x93C\x03\x1fÏ\x9aF埁= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 945.44ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 192 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 945.44ms, which exceeds the deadline of 200.00ms'), "args = ('ÿ\\x14/ãË\\U000b192f\\U0008...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='ÿ\x14/ãË\U000b192f\U00085cff³\x93C\x03\x1fÏ\x9aF埁 data = ConjectureData(INTERESTING, 35 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 35 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 35 bytes, frozen) function = .run at 0xffffff9deda980> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 35 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U0004f58d\x02\x1f\U00080f84', right = 'Ó' alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0004f58d\x02\x1f\U00080f84', 'Ó', Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5422926.119901333, result = None finish = 5422926.624210733, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=504309) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 504.31ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 35 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 504.31ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0004f58d\\x02\\x1f\\U000...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U0004f58d\x02\x1f\U00080f84', right='Ó', alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}), E left='\U0004f58d\x02\x1f\U00080f84', E right='Ó', E ) E Unreliable test timings! On an initial run, this test took 504.31ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.30 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg1] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 75 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 75 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 75 bytes, frozen) function = .run at 0xffffff982089a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 75 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '(Ì\x1bäÆB+¡\x06Ü', right = '~\x0bë)𢝓' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('(Ì\x1bäÆB+¡\x06Ü', '~\x0bë)𢝓', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5422937.760452191, result = None finish = 5422938.056568692, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=296117) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 296.12ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 75 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 296.12ms, which exceeds the deadline of 200.00ms'), "args = ('(Ì\\x1bäÆB+¡\\x06Ü', '~\\x0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='(Ì\x1bäÆB+¡\x06Ü', right='~\x0bë)𢝓', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left='(Ì\x1bäÆB+¡\x06Ü', E right='~\x0bë)𢝓', E ) E Unreliable test timings! On an initial run, this test took 296.12ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.95 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg1] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 12 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 12 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 12 bytes, frozen) function = .run at 0xffffff769d3920> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 12 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = 'Nîî' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Nîî', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 5422941.729928891, result = None finish = 5422942.012972473, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=283044) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 283.04ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 12 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 283.04ms, which exceeds the deadline of 200.00ms'), "args = ('Nîî', Hamming({'qval': 1, '...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='Nîî', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E text='Nîî', E ) E Unreliable test timings! On an initial run, this test took 283.04ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.31 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg2] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 12 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 12 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 12 bytes, frozen) function = .run at 0xffffff98101bc0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 12 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x8f', right = 'C' alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x8f', 'C', Levenshtein({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 5422951.767100827, result = None finish = 5422956.050088086, internal_draw_time = 0 runtime = datetime.timedelta(seconds=4, microseconds=282987) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 4282.99ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 12 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 4282.99ms, which exceeds the deadline of 200.00ms'), "args = ('\\x8f', 'C', Levenshtein({...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x8f', right='C', alg=Levenshtein({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Levenshtein({'qval': 1, 'test_func': , 'external': True}), E left='\x8f', E right='C', E ) E Unreliable test timings! On an initial run, this test took 4282.99ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.48 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg1] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 40 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 40 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 40 bytes, frozen) function = .run at 0xffffff86676840> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 40 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '¸\x92c', right = '𦦫\U000d8a6c\U000b718f' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¸\x92c', '𦦫\U000d8a6c\U000b718f', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5422950.084712084, result = None finish = 5422954.659897108, internal_draw_time = 0 runtime = datetime.timedelta(seconds=4, microseconds=575185) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 4575.19ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 40 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 4575.19ms, which exceeds the deadline of 200.00ms'), "args = ('¸\\x92c', '𦦫\\U000d8a6c\\U...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='¸\x92c', right='𦦫\U000d8a6c\U000b718f', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left='¸\x92c', E right='𦦫\U000d8a6c\U000b718f', E ) E Unreliable test timings! On an initial run, this test took 4575.19ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.67 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg14] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 15 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 15 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 15 bytes, frozen) function = .run at 0xffffff992b27a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 15 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = ',\x01©' alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', ',\x01©', Cosine({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5422970.708666968, result = None finish = 5422971.153618925, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=444952) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 444.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 15 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 444.95ms, which exceeds the deadline of 200.00ms'), "args = ('', ',\\x01©', Cosine({'qval...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right=',\x01©', alg=Cosine({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Cosine({'qval': 1, 'as_set': False, 'external': True}), E left='', E right=',\x01©', E ) E Unreliable test timings! On an initial run, this test took 444.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.21 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg14] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 51 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 51 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 51 bytes, frozen) function = .run at 0xffffff9dd51f80> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 51 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'M2', right = '¦\U000955c1\x12ùO¿Ì' alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('M2', '¦\U000955c1\x12ùO¿Ì', Cosine({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5422978.051168029, result = None finish = 5422978.425871946, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=374704) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 374.70ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 51 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 374.70ms, which exceeds the deadline of 200.00ms'), "args = ('M2', '¦\\U000955c1\\x12ùO¿Ì...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='M2', right='¦\U000955c1\x12ùO¿Ì', alg=Cosine({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Cosine({'qval': 1, 'as_set': False, 'external': True}), E left='M2', E right='¦\U000955c1\x12ùO¿Ì', E ) E Unreliable test timings! On an initial run, this test took 374.70ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.07 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg4] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 5 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 5 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) function = .run at 0xffffff7696d120> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '0' alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 5422989.275846194, result = None finish = 5422989.620657038, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=344811) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 344.81ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 344.81ms, which exceeds the deadline of 200.00ms'), "args = ('0', Jaro({'qval': 1, 'long_...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='0', alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}), E text='0', E ) E Unreliable test timings! On an initial run, this test took 344.81ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.24 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg15] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 59 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 59 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 59 bytes, frozen) function = .run at 0xffffff992b2d40> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 59 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x87x\x04`Õþ𨫆\x10Y\U000b89aa\x9d', right = '9' alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x87x\x04`Õþ𨫆\x10Y\U000b89aa\x9d', '9', StrCmp95({'long_strings': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5422997.100165949, result = None finish = 5422997.622983617, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=522818) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 522.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 59 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 522.82ms, which exceeds the deadline of 200.00ms'), "args = ('\\x87x\\x04`Õþ𨫆\\x10Y\\U000...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\x87x\x04`Õþ𨫆\x10Y\U000b89aa\x9d', right='9', alg=StrCmp95({'long_strings': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=StrCmp95({'long_strings': False, 'external': True}), E left='\x87x\x04`Õþ𨫆\x10Y\U000b89aa\x9d', E right='9', E ) E Unreliable test timings! On an initial run, this test took 522.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.30 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg4] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 22 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 22 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) function = .run at 0xffffff83694680> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 22 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '\U000c4090·V}' alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U000c4090·V}', Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423002.669892343, result = None finish = 5423003.020737642, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=350845) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 350.85ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 22 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 350.85ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U000c4090·V}', Jaro({...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='\U000c4090·V}', alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}), E left='', E right='\U000c4090·V}', E ) E Unreliable test timings! On an initial run, this test took 350.85ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.21 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg17] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 15 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 15 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 15 bytes, frozen) function = .run at 0xffffff803b6520> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 15 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '1\U00070c003', alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('1\U00070c003', MRA({'qval': 1, 'external': True})), kwargs = {} initial_draws = 1, start = 5423002.329239436, result = None finish = 5423002.622060106, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=292821) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 292.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 15 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 292.82ms, which exceeds the deadline of 200.00ms'), "args = ('1\\U00070c003', MRA({'qval'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='1\U00070c003', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=MRA({'qval': 1, 'external': True}), E text='1\U00070c003', E ) E Unreliable test timings! On an initial run, this test took 292.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.07 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg16] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 45 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 45 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 45 bytes, frozen) function = .run at 0xffffff9dc572e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 45 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000ffd3e4õ^4=', right = '𝜾t' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000ffd3e4õ^4=', '𝜾t', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423015.465488234, result = None finish = 5423020.63075634, internal_draw_time = 0 runtime = datetime.timedelta(seconds=5, microseconds=165268) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 5165.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 45 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 5165.27ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000ffd3e4õ^4=', '𝜾t', M...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U000ffd3e4õ^4=', right='𝜾t', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='\U000ffd3e4õ^4=', E right='𝜾t', E ) E Unreliable test timings! On an initial run, this test took 5165.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.75 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg5] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 48 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 48 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 48 bytes, frozen) function = .run at 0xffffff836960c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 48 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x10_\x83\x9bÆU\U0006d079\x92', right = '\U00106126' alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x10_\x83\x9bÆU\U0006d079\x92', '\U00106126', JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423022.541905269, result = None finish = 5423022.833026223, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=291121) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 291.12ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 48 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 291.12ms, which exceeds the deadline of 200.00ms'), "args = ('\\x10_\\x83\\x9bÆU\\U0006d0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x10_\x83\x9bÆU\U0006d079\x92', right='\U00106126', alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}), E left='\x10_\x83\x9bÆU\U0006d079\x92', E right='\U00106126', E ) E Unreliable test timings! On an initial run, this test took 291.12ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.46 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg7] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 94 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 94 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 94 bytes, frozen) function = .run at 0xffffff7693c220> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 94 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = 'a`\U00035b10!a¬>\U0003e3b5¸-\x87\U000511c4\x0bÃÜ\xa0\U00106b50' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('a`\U00035b10!a¬>\U0003e3b5¸-\x87\U000511c4\x0bÃÜ\xa0\U00106b50', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 5423027.776123203, result = None finish = 5423028.037636088, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=261513) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 261.51ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 94 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 261.51ms, which exceeds the deadline of 200.00ms'), "args = ('a`\\U00035b10!a¬>\\U0003e3b...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='a`\U00035b10!a¬>\U0003e3b5¸-\x87\U000511c4\x0bÃÜ\xa0\U00106b50', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E text='a`\U00035b10!a¬>\U0003e3b5¸-\x87\U000511c4\x0bÃÜ\xa0\U00106b50', E ) E Unreliable test timings! On an initial run, this test took 261.51ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 16.25 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg18] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 28 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 28 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 28 bytes, frozen) function = .run at 0xffffff802a89a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 28 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = 'öö\x07èø\xad' alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('öö\x07èø\xad', Prefix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 1, start = 5423034.619567013, result = None finish = 5423035.029554752, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=409988) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 409.99ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 28 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 409.99ms, which exceeds the deadline of 200.00ms'), "args = ('öö\\x07èø\\xad', Prefix({'q...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='öö\x07èø\xad', alg=Prefix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Prefix({'qval': 1, 'sim_test': }), E text='öö\x07èø\xad', E ) E Unreliable test timings! On an initial run, this test took 409.99ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.90 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg6] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 26 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 26 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 26 bytes, frozen) function = .run at 0xffffff9808b6a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 26 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U0004f144ដ', right = '\xa0Ø' alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0004f144ដ', '\xa0Ø', MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423039.979635895, result = None finish = 5423041.036993337, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=57357) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1057.36ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 26 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1057.36ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0004f144ដ', '\\xa0Ø', M...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U0004f144ដ', right='\xa0Ø', alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}), E left='\U0004f144ដ', E right='\xa0Ø', E ) E Unreliable test timings! On an initial run, this test took 1057.36ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.82 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg17] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 28 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 28 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 28 bytes, frozen) function = .run at 0xffffff9df23420> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 28 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '*', right = '\x9b\x8d\U00057863ó' alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('*', '\x9b\x8d\U00057863ó', MRA({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423038.040002606, result = None finish = 5423038.423709705, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=383707) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 383.71ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 28 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 383.71ms, which exceeds the deadline of 200.00ms'), "args = ('*', '\\x9b\\x8d\\U00057863ó...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='*', right='\x9b\x8d\U00057863ó', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MRA({'qval': 1, 'external': True}), E left='*', E right='\x9b\x8d\U00057863ó', E ) E Unreliable test timings! On an initial run, this test took 383.71ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.04 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg8] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 32 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 32 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 32 bytes, frozen) function = .run at 0xffffff7693efc0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 32 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = "\U000b99f4销\U000f0f01\U000b99f4''" alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ("\U000b99f4销\U000f0f01\U000b99f4''", LCSStr({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 1, start = 5423041.150628574, result = None finish = 5423041.421903148, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=271275) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 271.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 32 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 271.27ms, which exceeds the deadline of 200.00ms'), 'args = ("\\U000b99f4销\\U000f0f01\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text="\U000b99f4销\U000f0f01\U000b99f4''", alg=LCSStr({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=LCSStr({'qval': 1, 'external': True}), E text="\U000b99f4销\U000f0f01\U000b99f4''", E ) E Unreliable test timings! On an initial run, this test took 271.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.85 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg7] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('_\x13q\x13½>\x05Sâ©ó\U00012761\x04ÕÏä쟷ÿô\U000a5ebb\U0003e2c4薰92', '_\x13q\x13½>\x05Sâ©ó\U00012761\x04ÕÏä쟷ÿô\U000a5ebb\U0003e2c4薰92', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 5423064.509300797, result = None finish = 5423064.841147523, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=331847) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 331.85ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalization_range( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E left='_\x13q\x13½>\x05Sâ©ó\U00012761\x04ÕÏä쟷ÿô\U000a5ebb\U0003e2c4薰92', E right='_\x13q\x13½>\x05Sâ©ó\U00012761\x04ÕÏä쟷ÿô\U000a5ebb\U0003e2c4薰92', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ________________________ test_normalization_range[alg8] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.00 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:50: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(124814349285756663799238082329220337852) to this test or run pytest with --hypothesis-seed=124814349285756663799238082329220337852 to reproduce this failure. _______________________ test_normalization_by_one[alg7] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 102 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 102 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 102 bytes, frozen) function = .run at 0xffffff9820a520> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 102 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = ':𰦯]Ú𬋅é\x92¦\U0008bb90', right = '\x18\x08\x1b±ð𤽕\x83\x97\x89\U0004d0f7' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (':𰦯]Ú𬋅é\x92¦\U0008bb90', '\x18\x08\x1b±ð𤽕\x83\x97\x89\U0004d0f7', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 5423072.868327328, result = None finish = 5423073.635600025, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=767273) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 767.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 102 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 767.27ms, which exceeds the deadline of 200.00ms'), "args = (':𰦯]Ú𬋅é\\x92¦\\U0008bb90', '...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left=':𰦯]Ú𬋅é\x92¦\U0008bb90', right='\x18\x08\x1b±ð𤽕\x83\x97\x89\U0004d0f7', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E left=':𰦯]Ú𬋅é\x92¦\U0008bb90', E right='\x18\x08\x1b±ð𤽕\x83\x97\x89\U0004d0f7', E ) E Unreliable test timings! On an initial run, this test took 767.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.09 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg10] ________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 183 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 183 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 183 bytes, frozen) function = .run at 0xffffff768c2480> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 183 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = "^}曙\x0b\U000c44f4𲁃'\x15Ç\U000a38feù\x19\U000d6ab9D\x8b¨à 9R6c" alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ("^}曙\x0b\U000c44f4𲁃'\x15Ç\U000a38feù\x19\U000d6ab9D\x8b¨à 9R6c", Jaccard({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 5423076.158649035, result = None finish = 5423076.421027728, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=262379) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 262.38ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 183 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 262.38ms, which exceeds the deadline of 200.00ms'), 'args = ("^}曙\\x0b\\U000c44f4𲁃\'\\x15...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text="^}曙\x0b\U000c44f4𲁃'\x15Ç\U000a38feù\x19\U000d6ab9D\x8b¨à 9R6c", alg=Jaccard({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Jaccard({'qval': 1, 'as_set': False, 'external': True}), E text="^}曙\x0b\U000c44f4𲁃'\x15Ç\U000a38feù\x19\U000d6ab9D\x8b¨à 9R6c", E ) E Unreliable test timings! On an initial run, this test took 262.38ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.25 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg9] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 24 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 24 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 24 bytes, frozen) function = .run at 0xffffff86676de0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 24 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'Õ', right = '\x02~\x80\x08' alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Õ', '\x02~\x80\x08', RatcliffObershelp({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423076.758376304, result = None finish = 5423077.091818245, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=333442) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 333.44ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 24 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 333.44ms, which exceeds the deadline of 200.00ms'), "args = ('Õ', '\\x02~\\x80\\x08', Rat...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='Õ', right='\x02~\x80\x08', alg=RatcliffObershelp({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=RatcliffObershelp({'qval': 1, 'external': True}), E left='Õ', E right='\x02~\x80\x08', E ) E Unreliable test timings! On an initial run, this test took 333.44ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.77 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg21] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 15 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 15 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 15 bytes, frozen) function = .run at 0xffffff6be4e700> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 15 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U000df2ec\x84\x82' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000df2ec\x84\x82', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 5423086.087773585, result = None finish = 5423086.424263053, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=336489) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 336.49ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 15 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 336.49ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000df2ec\\x84\\x82', Nee...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000df2ec\x84\x82', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E text='\U000df2ec\x84\x82', E ) E Unreliable test timings! On an initial run, this test took 336.49ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.32 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg11] ________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 98 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 98 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 98 bytes, frozen) function = .run at 0xffffff766da520> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 98 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U0003a66f\U0003a66fN\x84\x13\x8dh\U0004d989\x0e\x12\x16=\x0b\U000d32a6Tªý¢k' alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0003a66f\U0003a66fN\x84\x13\x8dh\U0004d989\x0e\x12\x16=\x0b\U000d32a6Tªý¢k', Sorensen({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 5423097.728833147, result = None finish = 5423098.023479934, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=294647) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 294.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 98 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 294.65ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0003a66f\\U0003a66fN\\x8...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U0003a66f\U0003a66fN\x84\x13\x8dh\U0004d989\x0e\x12\x16=\x0b\U000d32a6Tªý¢k', alg=Sorensen({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Sorensen({'qval': 1, 'as_set': False, 'external': True}), E text='\U0003a66f\U0003a66fN\x84\x13\x8dh\U0004d989\x0e\x12\x16=\x0b\U000d32a6Tªý¢k', E ) E Unreliable test timings! On an initial run, this test took 294.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.21 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg21] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 252 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 252 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 252 bytes, frozen) function = .run at 0xffffff9da7dee0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 252 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x90=í𨋄%\x1dvebt\x9d·¼\x9fÁPq\x91' right = 'Ó\x06³=?Ò¸ç\x98á\x93\uf1e6gGª\x11p:\x03ûN²iI𬽙\U000c6369\U00041c94\U0006c239hü' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x90=í𨋄%\x1dvebt\x9d·¼\x9fÁPq\x91', 'Ó\x06³=?Ò¸ç\x98á\x93\uf1e6gGª\x11p:\x03ûN²iI𬽙\U000c6369\U00041c94\U0006c239hü', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 5423108.292452784, result = None finish = 5423108.637108927, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=344656) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 344.66ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 252 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 344.66ms, which exceeds the deadline of 200.00ms'), "args = ('\\x90=í𨋄%\\x1dvebt\\x9d·¼\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x90=í𨋄%\x1dvebt\x9d·¼\x9fÁPq\x91', right='Ó\x06³=?Ò¸ç\x98á\x93\uf1e6gGª\x11p:\x03ûN²iI𬽙\U000c6369\U00041c94\U0006c239hü', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='\x90=í𨋄%\x1dvebt\x9d·¼\x9fÁPq\x91', E right='Ó\x06³=?Ò¸ç\x98á\x93\uf1e6gGª\x11p:\x03ûN²iI𬽙\U000c6369\U00041c94\U0006c239hü', E ) E Unreliable test timings! On an initial run, this test took 344.66ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 35.88 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg11] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 68 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 68 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 68 bytes, frozen) function = .run at 0xffffff85dac400> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 68 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '!¾E\x83£\x156nÆ\t\\\x8d\x96', right = 'ó' alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('!¾E\x83£\x156nÆ\t\\\x8d\x96', 'ó', Sorensen({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423116.528714096, result = None finish = 5423116.825310701, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=296597) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 296.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 68 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 296.60ms, which exceeds the deadline of 200.00ms'), "args = ('!¾E\\x83£\\x156nÆ\\t\\\\\\x...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='!¾E\x83£\x156nÆ\t\\\x8d\x96', right='ó', alg=Sorensen({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Sorensen({'qval': 1, 'as_set': False, 'external': True}), E left='!¾E\x83£\x156nÆ\t\\\x8d\x96', E right='ó', E ) E Unreliable test timings! On an initial run, this test took 296.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg22] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 111 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 111 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 111 bytes, frozen) function = .run at 0xffffff9d5f0900> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 111 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '-\U0008780añ\x0b\x13\x81\x1dHÃ\U000f2e3a1\U000a3af5&;Oiª' right = '\U00048716¿\x85L' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('-\U0008780añ\x0b\x13\x81\x1dHÃ\U000f2e3a1\U000a3af5&;Oiª', '\U00048716¿\x85L', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 5423122.547780688, result = None finish = 5423123.025474144, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=477693) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 477.69ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 111 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 477.69ms, which exceeds the deadline of 200.00ms'), "args = ('-\\U0008780añ\\x0b\\x13\\x8...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='-\U0008780añ\x0b\x13\x81\x1dHÃ\U000f2e3a1\U000a3af5&;Oiª', right='\U00048716¿\x85L', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='-\U0008780añ\x0b\x13\x81\x1dHÃ\U000f2e3a1\U000a3af5&;Oiª', E right='\U00048716¿\x85L', E ) E Unreliable test timings! On an initial run, this test took 477.69ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.71 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg16] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 16 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 16 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) function = .run at 0xffffff991b2fc0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000194cb1', right = '3' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000194cb1', '3', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423126.748026492, result = None finish = 5423127.021404985, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=273378) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 273.38ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 16 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 273.38ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000194cb1', '3', MongeEl...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U000194cb1', right='3', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='\U000194cb1', E right='3', E ) E Unreliable test timings! On an initial run, this test took 273.38ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.29 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg3] ______________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 25 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 25 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 25 bytes, frozen) function = .run at 0xffffff6be4c860> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 25 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '̵ãÌÒ', right = '', alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('̵ãÌÒ', '', RLENCD({'qval': 1})), kwargs = {}, initial_draws = 2 start = 5423142.107157662, result = None, finish = 5423142.406392091 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=299234) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 299.23ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 25 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 299.23ms, which exceeds the deadline of 200.00ms'), "args = ('̵ãÌÒ', '', RLENCD({'qval':...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='̵ãÌÒ', right='', alg=RLENCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=RLENCD({'qval': 1}), E left='̵ãÌÒ', E right='', E ) E Unreliable test timings! On an initial run, this test took 299.23ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.20 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg23] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 100 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 100 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 100 bytes, frozen) function = .run at 0xffffff9d5f6160> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 100 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '9P', right = 'Eè\U000d61cc{´Ô\U000e15f7½í\U000d8140A}:' alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'B', 'O', 'D', 'G', 'N', 'Q', 'V', 'A', 'E', 'Y', 'P', 'J', 'I', 'X', 'M', 'K', 'L', 'U', 'R', 'F', 'Z', 'T', 'C'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('9P', 'Eè\U000d61cc{´Ô\U000e15f7½í\U000d8140A}:', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'loca... 'B', 'O', 'D', 'G', 'N', 'Q', 'V', 'A', 'E', 'Y', 'P', 'J', 'I', 'X', 'M', 'K', 'L', 'U', 'R', 'F', 'Z', 'T', 'C'})})) kwargs = {}, initial_draws = 2, start = 5423143.717294046, result = None finish = 5423143.969628047, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=252334) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 252.33ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'B', 'O', 'D', 'G', 'N', 'Q', 'V', 'A', 'E', 'Y', 'P', 'J', 'I', 'X', 'M', 'K', 'L', 'U', 'R', 'F', 'Z', 'T', 'C'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 100 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 252.33ms, which exceeds the deadline of 200.00ms'), "args = ('9P', 'Eè\\U000d61cc{´Ô\\U00...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='9P', right='Eè\U000d61cc{´Ô\U000e15f7½í\U000d8140A}:', alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'B', 'O', 'D', 'G', 'N', 'Q', 'V', 'A', 'E', 'Y', 'P', 'J', 'I', 'X', 'M', 'K', 'L', 'U', 'R', 'F', 'Z', 'T', 'C'})})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'S', 'B', 'O', 'D', 'G', 'N', 'Q', 'V', 'A', 'E', 'Y', 'P', 'J', 'I', 'X', 'M', 'K', 'L', 'U', 'R', 'F', 'Z', 'T', 'C'})}), E left='9P', E right='Eè\U000d61cc{´Ô\U000e15f7½í\U000d8140A}:', E ) E Unreliable test timings! On an initial run, this test took 252.33ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 10.58 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg11] _______________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 137 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 137 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 137 bytes, frozen) function = .run at 0xffffff8311af20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 137 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 's\x0eÔ戴\x1aû\x8fÝ\x12\x9b\x0búµ×' right = '@ö¥Ç÷ýv\x03P\x7fnú\U000ebb2d' alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('s\x0eÔ戴\x1aû\x8fÝ\x12\x9b\x0búµ×', '@ö¥Ç÷ýv\x03P\x7fnú\U000ebb2d', Sorensen({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 5423151.679788961, result = None finish = 5423151.991209901, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=311421) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 311.42ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 137 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 311.42ms, which exceeds the deadline of 200.00ms'), "args = ('s\\x0eÔ戴\\x1aû\\x8fÝ\\x12\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='s\x0eÔ戴\x1aû\x8fÝ\x12\x9b\x0búµ×', right='@ö¥Ç÷ýv\x03P\x7fnú\U000ebb2d', alg=Sorensen({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Sorensen({'qval': 1, 'as_set': False, 'external': True}), E left='s\x0eÔ戴\x1aû\x8fÝ\x12\x9b\x0búµ×', E right='@ö¥Ç÷ýv\x03P\x7fnú\U000ebb2d', E ) E Unreliable test timings! On an initial run, this test took 311.42ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg17] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 32 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 32 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 32 bytes, frozen) function = .run at 0xffffff98fb4e00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 32 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x8c\x14', right = 'E¬𠬇', alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x8c\x14', 'E¬𠬇', MRA({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 5423158.948206547, result = None finish = 5423159.221705141, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=273499) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 273.50ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 32 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 273.50ms, which exceeds the deadline of 200.00ms'), "args = ('\\x8c\\x14', 'E¬𠬇', MRA({'q...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\x8c\x14', right='E¬𠬇', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MRA({'qval': 1, 'external': True}), E left='\x8c\x14', E right='E¬𠬇', E ) E Unreliable test timings! On an initial run, this test took 273.50ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.50 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg18] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 28 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 28 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 28 bytes, frozen) function = .run at 0xffffff98fb6a20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 28 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x97\x1d\U000878b9\x8aô', right = '' alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x97\x1d\U000878b9\x8aô', '', Prefix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 2, start = 5423172.129819958, result = None finish = 5423172.625508779, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=495689) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 495.69ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 28 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 495.69ms, which exceeds the deadline of 200.00ms'), "args = ('\\x97\\x1d\\U000878b9\\x8aô...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\x97\x1d\U000878b9\x8aô', right='', alg=Prefix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Prefix({'qval': 1, 'sim_test': }), E left='\x97\x1d\U000878b9\x8aô', E right='', E ) E Unreliable test timings! On an initial run, this test took 495.69ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.30 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg4] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 61 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 61 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 61 bytes, frozen) function = .run at 0xffffff82fc65c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 61 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'I\U0005e69f¶\U000900c8[*\U000ea4fb"\x19\x02', right = '' alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('I\U0005e69f¶\U000900c8[*\U000ea4fb"\x19\x02', '', ZLIBNCD({})) kwargs = {}, initial_draws = 2, start = 5423170.093146585, result = None finish = 5423170.423005893, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=329859) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 329.86ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 61 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 329.86ms, which exceeds the deadline of 200.00ms'), 'args = (\'I\\U0005e69f¶\\U000900c8[*...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='I\U0005e69f¶\U000900c8[*\U000ea4fb"\x19\x02', right='', alg=ZLIBNCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ZLIBNCD({}), E left='I\U0005e69f¶\U000900c8[*\U000ea4fb"\x19\x02', E right='', E ) E Unreliable test timings! On an initial run, this test took 329.86ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.61 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ______________________________ test_qval[3-Jaro] _______________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 39 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 39 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 39 bytes, frozen) function = .run at 0xffffff9d5f1e40> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 39 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'y', right = 'ßMÏ\x9e\U000c3063a', alg = 'Jaro', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('y', 'ßMÏ\x9e\U000c3063a', 'Jaro', 3), kwargs = {}, initial_draws = 2 start = 5423172.768393682, result = None, finish = 5423173.041260471 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=272867) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 272.87ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 39 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 272.87ms, which exceeds the deadline of 200.00ms'), "args = ('y', 'ßMÏ\\x9e\\U000c3063a',...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='y', right='ßMÏ\x9e\U000c3063a', alg='Jaro', qval=3) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=3, E left='y', E right='ßMÏ\x9e\U000c3063a', E ) E Unreliable test timings! On an initial run, this test took 272.87ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.48 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_qval[3-JaroWinkler] ___________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = 'JaroWinkler', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.21 seconds (0 invalid ones and 3 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_external.py:51: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(223125797972860819972347860590429951150) to this test or run pytest with --hypothesis-seed=223125797972860819972347860590429951150 to reproduce this failure. _________________________ test_qval[None-JaroWinkler] __________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 9 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 9 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 9 bytes, frozen) function = .run at 0xffffff76ba77e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 9 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '0', right = '0', alg = 'JaroWinkler', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '0', 'JaroWinkler', None), kwargs = {}, initial_draws = 2 start = 5423182.351583877, result = None, finish = 5423182.622403347 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=270819) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 270.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 9 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 270.82ms, which exceeds the deadline of 200.00ms'), "args = ('0', '0', 'JaroWinkler', Non...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='0', right='0', alg='JaroWinkler', qval=None) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=None, E left='0', E right='0', E ) E Unreliable test timings! On an initial run, this test took 270.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.42 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________________________ test_list_of_numbers[Jaro] __________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = 'Jaro' @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.19 seconds (0 invalid ones and 3 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_external.py:91: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(250667810856351186475017291033584858522) to this test or run pytest with --hypothesis-seed=250667810856351186475017291033584858522 to reproduce this failure. ________________________ test_distributivity_compressor ________________________ [gw6] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 13 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 13 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 13 bytes, frozen) function = .run at 0xffffff8a8177e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 13 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left1 = '0', left2 = '0', right = '0' @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.text(min_size=1), ) tests/test_compression/test_sqrt_ncd.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '0', '0'), kwargs = {}, initial_draws = 3 start = 5423180.328568428, result = None, finish = 5423180.622219006 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=293651) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 293.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.text(min_size=1), ) tests/test_compression/test_sqrt_ncd.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 13 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 293.65ms, which exceeds the deadline of 200.00ms'), "args = ('0', '0', '0'), kwargs = {},...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_distributivity_compressor(left1='0', left2='0', right='0') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_distributivity_compressor( E left1='0', E left2='0', E right='0', E ) E Unreliable test timings! On an initial run, this test took 293.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.47 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_simmetry_compressor ___________________________ [gw7] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 56 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 56 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 56 bytes, frozen) function = .run at 0xffffff941602c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 56 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = 'R{\x05\U000df3ae3\U000a776c}×Ë' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_simmetry_compressor(text): tests/test_compression/test_entropy_ncd.py:26: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('R{\x05\U000df3ae3\U000a776c}×Ë',), kwargs = {}, initial_draws = 1 start = 5423179.468335683, result = None, finish = 5423180.290883385 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=822548) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 822.55ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_simmetry_compressor(text): tests/test_compression/test_entropy_ncd.py:26: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 56 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 822.55ms, which exceeds the deadline of 200.00ms'), "args = ('R{\\x05\\U000df3ae3\\U000a7...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry_compressor(text='R{\x05\U000df3ae3\U000a776c}×Ë') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry_compressor( E text='R{\x05\U000df3ae3\U000a776c}×Ë', E ) E Unreliable test timings! On an initial run, this test took 822.55ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.45 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg5] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 64 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 64 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 64 bytes, frozen) function = .run at 0xffffff83254220> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 64 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '¬\U000d9202k0àÈ\U000d430dbϦ', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '¬\U000d9202k0àÈ\U000d430dbϦ', SqrtNCD({'qval': 1})), kwargs = {} initial_draws = 2, start = 5423191.319532662, result = None finish = 5423191.627153367, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=307621) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 307.62ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 64 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 307.62ms, which exceeds the deadline of 200.00ms'), "args = ('', '¬\\U000d9202k0àÈ\\U000d...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='¬\U000d9202k0àÈ\U000d430dbϦ', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=SqrtNCD({'qval': 1}), E left='', E right='¬\U000d9202k0àÈ\U000d430dbϦ', E ) E Unreliable test timings! On an initial run, this test took 307.62ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.65 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg0] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 5 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 5 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) function = .run at 0xffffff85dac040> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '1' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '1', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 5423199.657785104, result = None finish = 5423200.624590821, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=966806) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 966.81ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 966.81ms, which exceeds the deadline of 200.00ms'), "args = ('', '1', ArithNCD({'base': 2...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='1', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='', E right='1', E ) E Unreliable test timings! On an initial run, this test took 966.81ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 14.64 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg5] ______________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 20 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 20 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 20 bytes, frozen) function = .run at 0xffffff804576a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 20 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = 'û\x0b\x89Ë', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'û\x0b\x89Ë', SqrtNCD({'qval': 1})), kwargs = {}, initial_draws = 2 start = 5423192.098006861, result = None, finish = 5423192.612644155 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=514637) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 514.64ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 20 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 514.64ms, which exceeds the deadline of 200.00ms'), "args = ('', 'û\\x0b\\x89Ë', SqrtNCD(...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='û\x0b\x89Ë', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=SqrtNCD({'qval': 1}), E left='', E right='û\x0b\x89Ë', E ) E Unreliable test timings! On an initial run, this test took 514.64ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.97 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg1] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', BWTRLENCD({'terminator': '\x00'})), kwargs = {} initial_draws = 2, start = 5423214.376182231, result = None finish = 5423214.924140828, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=547959) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 547.96ms, which exceeds the deadline of 200.00ms E Falsifying example: test_simmetry( E alg=BWTRLENCD({'terminator': '\x00'}), E left='0', E right='', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ______________________ test_list_of_numbers[JaroWinkler] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 41 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 41 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 41 bytes, frozen) function = .run at 0xffffff9da7cc20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 41 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = [], right = [94, 6225, -92, -1530318293, 835, 73], alg = 'JaroWinkler' @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ([], [94, 6225, -92, -1530318293, 835, 73], 'JaroWinkler'), kwargs = {} initial_draws = 2, start = 5423210.349933313, result = None finish = 5423210.693491046, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=343558) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 343.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler' @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 41 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 343.56ms, which exceeds the deadline of 200.00ms'), "args = ([], [94, 6225, -92, -1530318...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_list_of_numbers(left=[], right=[94, 6225, -92, -1530318293, 835, 73], alg='JaroWinkler') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_list_of_numbers( E alg='JaroWinkler', E left=[], E right=[94, 6225, -92, -1530318293, 835, 73], E ) E Unreliable test timings! On an initial run, this test took 343.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.29 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg2] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffff85ee1e40> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '', alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 5423218.658505884, result = None, finish = 5423218.956957806 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=298452) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 298.45ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 298.45ms, which exceeds the deadline of 200.00ms'), "args = ('', '', BZ2NCD({})), kwargs ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='', alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=BZ2NCD({}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 298.45ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 183.45 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg6] ______________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 68 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 68 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 68 bytes, frozen) function = .run at 0xffffff80133240> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 68 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '-\x13\x04©b\x18Ó\x84ÆPߤw×' alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '-\x13\x04©b\x18Ó\x84ÆPߤw×', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) kwargs = {}, initial_draws = 2, start = 5423220.980768362, result = None finish = 5423221.559937544, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=579169) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 579.17ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 68 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 579.17ms, which exceeds the deadline of 200.00ms'), "args = ('', '-\\x13\\x04©b\\x18Ó\\x8...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='-\x13\x04©b\x18Ó\x84ÆPߤw×', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='', E right='-\x13\x04©b\x18Ó\x84ÆPߤw×', E ) E Unreliable test timings! On an initial run, this test took 579.17ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.76 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg21] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 32 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 32 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 32 bytes, frozen) function = .run at 0xffffff98ab0540> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 32 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x01MÉ\x01', right = '\x01MÉ\x01' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x01MÉ\x01', '\x01MÉ\x01', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 5423238.03935653, result = None finish = 5423238.301047717, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=261691) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 261.69ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 32 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 261.69ms, which exceeds the deadline of 200.00ms'), "args = ('\\x01MÉ\\x01', '\\x01MÉ\\x0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\x01MÉ\x01', right='\x01MÉ\x01', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='\x01MÉ\x01', E right='\x01MÉ\x01', E ) E Unreliable test timings! On an initial run, this test took 261.69ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.65 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg0] _________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 31 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 31 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 31 bytes, frozen) function = .run at 0xffffff83736ac0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 31 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000b5bd5\U00102634t', right = '+Z' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000b5bd5\U00102634t', '+Z', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 5423242.65681294, result = None finish = 5423243.042627459, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=385815) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 385.81ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 31 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 385.81ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000b5bd5\\U00102634t', '...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='\U000b5bd5\U00102634t', right='+Z', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='\U000b5bd5\U00102634t', E right='+Z', E ) E Unreliable test timings! On an initial run, this test took 385.81ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 9.74 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ______________________________ test_qval[2-Jaro] _______________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 164 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 164 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 164 bytes, frozen) function = .run at 0xffffff7683dda0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 164 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '츇¤U;IÂ\U0003902eJ\U00101b68\U000b1e17\U0005ff55ý\x8eX\U00087a40¤ík\x80\x83i\U0008bf97\U0006323b' right = 'mo', alg = 'Jaro', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('츇¤U;IÂ\U0003902eJ\U00101b68\U000b1e17\U0005ff55ý\x8eX\U00087a40¤ík\x80\x83i\U0008bf97\U0006323b', 'mo', 'Jaro', 2) kwargs = {}, initial_draws = 2, start = 5423251.222156353, result = None finish = 5423251.628371557, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=406215) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 406.21ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 164 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 406.21ms, which exceeds the deadline of 200.00ms'), "args = ('츇¤U;IÂ\\U0003902eJ\\U00101b...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='츇¤U;IÂ\U0003902eJ\U00101b68\U000b1e17\U0005ff55ý\x8eX\U00087a40¤ík\x80\x83i\U0008bf97\U0006323b', right='mo', alg='Jaro', qval=2) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=2, E left='츇¤U;IÂ\U0003902eJ\U00101b68\U000b1e17\U0005ff55ý\x8eX\U00087a40¤ík\x80\x83i\U0008bf97\U0006323b', E right='mo', E ) E Unreliable test timings! On an initial run, this test took 406.21ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.16 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________________________ test_compare_with_tversky ___________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 24 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 24 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 24 bytes, frozen) function = .run at 0xffffff9df23060> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 24 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000a07da6', right = '⺡' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000a07da6', '⺡'), kwargs = {}, initial_draws = 2 start = 5423260.598758564, result = None, finish = 5423260.953259897 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=354501) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 354.50ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 24 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 354.50ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000a07da6', '⺡'), kwargs...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky(left='\U000a07da6', right='⺡') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky( E left='\U000a07da6', E right='⺡', E ) E Unreliable test timings! On an initial run, this test took 354.50ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.05 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg22] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000e555d\U0001174cË1\U000c6ff3\x96\x1f\x0c\x9bÄ\x8a\\F\x05æm\x94(\U000d99e2𠳽j\x93l\x91\xad\x1eÎ\U000df440Û\x17u𭟓\x... SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 5423273.543801419, result = None finish = 5423273.846201976, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=302401) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 302.40ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalization_range( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='\U000e555d\U0001174cË1\U000c6ff3\x96\x1f\x0c\x9bÄ\x8a\\F\x05æm\x94(\U000d99e2𠳽j\x93l\x91\xad\x1eÎ\U000df440Û\x17u𭟓\xa0«ÅïKò1?¤\x87×O*@1[©', E right='ùm\x8e,C\U000cac84\U0001d547\x9fïá\U00079297ò[&', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ___________________________ test_is_normalized[alg0] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 37 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 37 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 37 bytes, frozen) function = .run at 0xffffff80133ec0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 37 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '6æ\U000427e8¨', right = 'FÏü' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('6æ\U000427e8¨', 'FÏü', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 5423253.932148267, result = None finish = 5423254.255753518, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=323605) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 323.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 37 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 323.60ms, which exceeds the deadline of 200.00ms'), "args = ('6æ\\U000427e8¨', 'FÏü', Ari...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='6æ\U000427e8¨', right='FÏü', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='6æ\U000427e8¨', E right='FÏü', E ) E Unreliable test timings! On an initial run, this test took 323.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 181.13 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_normalization_range ___________________________ [gw7] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 47 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 47 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 47 bytes, frozen) function = .run at 0xffffff7fec1260> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 47 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '𐦌ÄÝ\U000fe118𠷵BBà' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𐦌ÄÝ\U000fe118𠷵BBà',), kwargs = {}, initial_draws = 1 start = 5423275.307478703, result = None, finish = 5423275.621686868 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=314208) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 314.21ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 47 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 314.21ms, which exceeds the deadline of 200.00ms'), "args = ('𐦌ÄÝ\\U000fe118𠷵BBà',), kwar...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(text='𐦌ÄÝ\U000fe118𠷵BBà') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E text='𐦌ÄÝ\U000fe118𠷵BBà', E ) E Unreliable test timings! On an initial run, this test took 314.21ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.72 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg1] _________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 33 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 33 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 33 bytes, frozen) function = .run at 0xffffff982a1f80> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 33 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '\U000997e2\x84sêD\U000cf839' alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U000997e2\x84sêD\U000cf839', BWTRLENCD({'terminator': '\x00'})) kwargs = {}, initial_draws = 2, start = 5423275.3071887, result = None finish = 5423275.621690868, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=314502) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 314.50ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 33 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 314.50ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U000997e2\\x84sêD\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='\U000997e2\x84sêD\U000cf839', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=BWTRLENCD({'terminator': '\x00'}), E left='', E right='\U000997e2\x84sêD\U000cf839', E ) E Unreliable test timings! On an initial run, this test took 314.50ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.42 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_compare_with_tversky_as_set _______________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff862b9ee0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = 'Ü' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_sorensen.py:37: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'Ü'), kwargs = {}, initial_draws = 2, start = 5423279.318566083 result = None, finish = 5423279.6056317, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=287066) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 287.07ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_sorensen.py:37: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 287.07ms, which exceeds the deadline of 200.00ms'), "args = ('', 'Ü'), kwargs = {}, initi...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky_as_set(left='', right='Ü') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky_as_set( E left='', E right='Ü', E ) E Unreliable test timings! On an initial run, this test took 287.07ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.18 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg1] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 75 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 75 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 75 bytes, frozen) function = .run at 0xffffff801965c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 75 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '쥈ho\x03𢻥^\x88\U000bc281\U000857abl' alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '쥈ho\x03𢻥^\x88\U000bc281\U000857abl', BWTRLENCD({'terminator': '\x00'})) kwargs = {}, initial_draws = 2, start = 5423279.315873858, result = None finish = 5423279.631181033, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=315307) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 315.31ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 75 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 315.31ms, which exceeds the deadline of 200.00ms'), "args = ('', '쥈ho\\x03𢻥^\\x88\\U000bc...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='쥈ho\x03𢻥^\x88\U000bc281\U000857abl', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=BWTRLENCD({'terminator': '\x00'}), E left='', E right='쥈ho\x03𢻥^\x88\U000bc281\U000857abl', E ) E Unreliable test timings! On an initial run, this test took 315.31ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.95 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg23] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 37 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 37 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 37 bytes, frozen) function = .run at 0xffffff992b13a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 37 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\x1f', right = '\x04\x0b\U000a18c1\x0b\U00046126' alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'G', 'Q', 'B', 'L', 'I', 'M', 'X', 'U', 'Y', 'N', 'J', 'D', 'T', 'O', 'F', 'E', 'R', 'S', 'C', 'V', 'A', 'Z', 'K', 'P'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x1f', '\x04\x0b\U000a18c1\x0b\U00046126', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': Fa... 'Q', 'B', 'L', 'I', 'M', 'X', 'U', 'Y', 'N', 'J', 'D', 'T', 'O', 'F', 'E', 'R', 'S', 'C', 'V', 'A', 'Z', 'K', 'P'})})) kwargs = {}, initial_draws = 2, start = 5423286.117777789, result = None finish = 5423286.426734707, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=308957) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 308.96ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'G', 'Q', 'B', 'L', 'I', 'M', 'X', 'U', 'Y', 'N', 'J', 'D', 'T', 'O', 'F', 'E', 'R', 'S', 'C', 'V', 'A', 'Z', 'K', 'P'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 37 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 308.96ms, which exceeds the deadline of 200.00ms'), "args = ('\\x1f', '\\x04\\x0b\\U000a1...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\x1f', right='\x04\x0b\U000a18c1\x0b\U00046126', alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'G', 'Q', 'B', 'L', 'I', 'M', 'X', 'U', 'Y', 'N', 'J', 'D', 'T', 'O', 'F', 'E', 'R', 'S', 'C', 'V', 'A', 'Z', 'K', 'P'})})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'G', 'Q', 'B', 'L', 'I', 'M', 'X', 'U', 'Y', 'N', 'J', 'D', 'T', 'O', 'F', 'E', 'R', 'S', 'C', 'V', 'A', 'Z', 'K', 'P'})}), E left='\x1f', E right='\x04\x0b\U000a18c1\x0b\U00046126', E ) E Unreliable test timings! On an initial run, this test took 308.96ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.72 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg2] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffff6bf0cc20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '', alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 5423289.719434435, result = None, finish = 5423290.035668119 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=316234) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 316.23ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 316.23ms, which exceeds the deadline of 200.00ms'), "args = ('', '', BZ2NCD({})), kwargs ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='', alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=BZ2NCD({}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 316.23ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 14.00 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg2] _________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 7 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 7 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 7 bytes, frozen) function = .run at 0xffffff980ed580> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 7 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '\U000c8edd', alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U000c8edd', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 5423298.746354657, result = None, finish = 5423299.112302295 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=365948) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 365.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 7 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 365.95ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U000c8edd', BZ2NCD({}...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='\U000c8edd', alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=BZ2NCD({}), E left='', E right='\U000c8edd', E ) E Unreliable test timings! On an initial run, this test took 365.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 10.06 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg3] _________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 97 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 97 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 97 bytes, frozen) function = .run at 0xffffff82fe3600> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 97 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '𞠤Ø\x05\x816§ò\U000d83d5\U0009b685÷\U000abef4¯â¬' right = '\U00059a6d\x9c', alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𞠤Ø\x05\x816§ò\U000d83d5\U0009b685÷\U000abef4¯â¬', '\U00059a6d\x9c', RLENCD({'qval': 1})) kwargs = {}, initial_draws = 2, start = 5423323.72895159, result = None finish = 5423324.225316216, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=496365) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 496.37ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 97 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 496.37ms, which exceeds the deadline of 200.00ms'), "args = ('𞠤Ø\\x05\\x816§ò\\U000d83d5\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='𞠤Ø\x05\x816§ò\U000d83d5\U0009b685÷\U000abef4¯â¬', right='\U00059a6d\x9c', alg=RLENCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=RLENCD({'qval': 1}), E left='𞠤Ø\x05\x816§ò\U000d83d5\U0009b685÷\U000abef4¯â¬', E right='\U00059a6d\x9c', E ) E Unreliable test timings! On an initial run, this test took 496.37ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.23 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg6] _________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 43 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 43 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 43 bytes, frozen) function = .run at 0xffffff831eaa20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 43 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = 'hº\x7f`\U00097fe8\x19\U000f7a17' alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'hº\x7f`\U00097fe8\x19\U000f7a17', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) kwargs = {}, initial_draws = 2, start = 5423355.17091223, result = None finish = 5423355.558853367, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=387941) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 387.94ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 43 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 387.94ms, which exceeds the deadline of 200.00ms'), "args = ('', 'hº\\x7f`\\U00097fe8\\x1...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='hº\x7f`\U00097fe8\x19\U000f7a17', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='', E right='hº\x7f`\U00097fe8\x19\U000f7a17', E ) E Unreliable test timings! On an initial run, this test took 387.94ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.09 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky =========================== short test summary info ============================ FAILED tests/test_common.py::test_normalization_same[alg0] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg0] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg0] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg12] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg1] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg1] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg2] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg1] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg14] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg14] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg4] - hypothesis.error... FAILED tests/test_common.py::test_normalization_range[alg15] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg4] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg17] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg16] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg5] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg7] - hypothesis.error... FAILED tests/test_common.py::test_normalization_same[alg18] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg6] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg17] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg8] - hypothesis.error... FAILED tests/test_common.py::test_normalization_range[alg7] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg8] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg7] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg10] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg9] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg21] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg11] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg21] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg11] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg22] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg16] - hypothesis.err... FAILED tests/test_compression/test_common.py::test_simmetry[alg3] - hypothesi... FAILED tests/test_common.py::test_normalization_by_one[alg23] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg11] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg17] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg18] - hypothesis.err... FAILED tests/test_compression/test_common.py::test_is_normalized[alg4] - hypo... FAILED tests/test_external.py::test_qval[3-Jaro] - hypothesis.errors.Flaky: H... FAILED tests/test_external.py::test_qval[3-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_external.py::test_qval[None-JaroWinkler] - hypothesis.error... FAILED tests/test_external.py::test_list_of_numbers[Jaro] - hypothesis.errors... FAILED tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor FAILED tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor FAILED tests/test_compression/test_common.py::test_is_normalized[alg5] - hypo... FAILED tests/test_compression/test_common.py::test_simmetry[alg0] - hypothesi... FAILED tests/test_compression/test_common.py::test_simmetry[alg5] - hypothesi... FAILED tests/test_compression/test_common.py::test_simmetry[alg1] - hypothesi... FAILED tests/test_external.py::test_list_of_numbers[JaroWinkler] - hypothesis... FAILED tests/test_compression/test_common.py::test_simmetry[alg2] - hypothesi... FAILED tests/test_compression/test_common.py::test_simmetry[alg6] - hypothesi... FAILED tests/test_common.py::test_normalization_range[alg21] - hypothesis.err... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg0] - ... FAILED tests/test_external.py::test_qval[2-Jaro] - hypothesis.errors.Flaky: H... FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky - hypothes... FAILED tests/test_common.py::test_normalization_range[alg22] - hypothesis.err... FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] - hypo... FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg1] - ... FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set - ... FAILED tests/test_compression/test_common.py::test_is_normalized[alg1] - hypo... FAILED tests/test_common.py::test_normalization_range[alg23] - hypothesis.err... FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] - hypo... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg2] - ... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg3] - ... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] - ... ================== 66 failed, 346 passed in 670.83s (0:11:10) ================== RPM build errors: error: Bad exit status from /var/tmp/rpm-tmp.z7jYYa (%check) Bad exit status from /var/tmp/rpm-tmp.z7jYYa (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec