Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2527130-61818/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1710115200 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2527130-61818/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1710115200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.rERS1h + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.hxDh5Y + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2527130-61818/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1710115200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.Jei421 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.gxzyXr + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2527130-61818/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1710115200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.AY7uUX + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.2wV2B0 + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2527130-61818/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1710115200 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.r5d1ib + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.3.2) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.R6VxY9 + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing /builddir/build/BUILD/textdistance-4.6.1 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) running dist_info creating /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance.egg-info writing /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance.egg-info/PKG-INFO writing dependency_links to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance.egg-info/dependency_links.txt writing requirements to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance.egg-info/requires.txt writing top-level names to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance.egg-info/top_level.txt writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance.egg-info/SOURCES.txt' reading manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-7x8vu9no/textdistance-4.6.1.dist-info' Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: textdistance Building wheel for textdistance (pyproject.toml): started Running command Building wheel for textdistance (pyproject.toml) running bdist_wheel running build running build_py creating build creating build/lib creating build/lib/textdistance copying textdistance/benchmark.py -> build/lib/textdistance copying textdistance/__init__.py -> build/lib/textdistance copying textdistance/utils.py -> build/lib/textdistance copying textdistance/libraries.py -> build/lib/textdistance creating build/lib/textdistance/algorithms copying textdistance/algorithms/token_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/vector_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/compression_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/types.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/__init__.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/sequence_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/edit_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/phonetic.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/simple.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/base.py -> build/lib/textdistance/algorithms copying textdistance/libraries.json -> build/lib/textdistance installing to build/bdist.linux-riscv64/wheel running install running install_lib creating build/bdist.linux-riscv64 creating build/bdist.linux-riscv64/wheel creating build/bdist.linux-riscv64/wheel/textdistance creating build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/token_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/vector_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/compression_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/types.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/sequence_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/edit_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/phonetic.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/simple.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/base.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/benchmark.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/libraries.json -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/utils.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/libraries.py -> build/bdist.linux-riscv64/wheel/textdistance running install_egg_info running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Copying textdistance.egg-info to build/bdist.linux-riscv64/wheel/textdistance-4.6.1-py3.12.egg-info running install_scripts creating build/bdist.linux-riscv64/wheel/textdistance-4.6.1.dist-info/WHEEL creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-wheel-sz2i8sce/.tmp-vz9ln3iq/textdistance-4.6.1-py3-none-any.whl' and adding 'build/bdist.linux-riscv64/wheel' to it adding 'textdistance/__init__.py' adding 'textdistance/benchmark.py' adding 'textdistance/libraries.json' adding 'textdistance/libraries.py' adding 'textdistance/utils.py' adding 'textdistance/algorithms/__init__.py' adding 'textdistance/algorithms/base.py' adding 'textdistance/algorithms/compression_based.py' adding 'textdistance/algorithms/edit_based.py' adding 'textdistance/algorithms/phonetic.py' adding 'textdistance/algorithms/sequence_based.py' adding 'textdistance/algorithms/simple.py' adding 'textdistance/algorithms/token_based.py' adding 'textdistance/algorithms/types.py' adding 'textdistance/algorithms/vector_based.py' adding 'textdistance-4.6.1.dist-info/LICENSE' adding 'textdistance-4.6.1.dist-info/METADATA' adding 'textdistance-4.6.1.dist-info/WHEEL' adding 'textdistance-4.6.1.dist-info/top_level.txt' adding 'textdistance-4.6.1.dist-info/RECORD' removing build/bdist.linux-riscv64/wheel Building wheel for textdistance (pyproject.toml): finished with status 'done' Created wheel for textdistance: filename=textdistance-4.6.1-py3-none-any.whl size=31018 sha256=8996eee7df66e19f58259429faf0d314f8d2d10a0996dc94c211e57d9fa2c324 Stored in directory: /builddir/.cache/pip/wheels/af/08/72/d6baf94a0831066222f63a4e4a469ea938a661b7e8974b7b68 Successfully built textdistance + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.AJsmVG + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 ++ ls /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl ++ xargs basename --multiple ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=textdistance==4.6.1 + '[' -z textdistance==4.6.1 ']' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir textdistance==4.6.1 Using pip 23.3.2 from /usr/lib/python3.12/site-packages/pip (python 3.12) Looking in links: /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing ./pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl Installing collected packages: textdistance Successfully installed textdistance-4.6.1 + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + site_dirs+=("/usr/lib/python3.12/site-packages") + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages '!=' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --record /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED' ++ wc -l /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo ++ cut -f1 '-d ' + lines=1 + '[' 1 -ne 1 ']' + RPM_PERCENTAGES_COUNT=2 + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --sitelib /usr/lib/python3.12/site-packages --sitearch /usr/lib64/python3.12/site-packages --python-version 3.12 --pyproject-record /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record --prefix /usr -l textdistance + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 4.6.1-1.fc40 --unique-debug-suffix -4.6.1-1.fc40.noarch --unique-debug-src-base python-textdistance-4.6.1-1.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/textdistance-4.6.1 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.RfbjmR + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + k='not test_compare[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest -v -k 'not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' -n auto ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.3.2, pluggy-1.3.0 -- /usr/bin/python3 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/builddir/build/BUILD/textdistance-4.6.1/.hypothesis/examples') rootdir: /builddir/build/BUILD/textdistance-4.6.1 configfile: setup.cfg plugins: xdist-3.5.0, hypothesis-6.82.0 created: 8/8 workers 8 workers [412 items] scheduling tests via LoadScheduling tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_monotonic[alg0] tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_monotonic[alg12] [gw5] [ 0%] PASSED tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_same[alg13] [gw3] [ 0%] PASSED tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_by_one[alg13] [gw4] [ 0%] PASSED tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_same[alg1] [gw2] [ 0%] PASSED tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_by_one[alg1] [gw7] [ 1%] PASSED tests/test_common.py::test_normalization_monotonic[alg12] tests/test_common.py::test_normalization_monotonic[alg13] [gw1] [ 1%] PASSED tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_range[alg13] [gw6] [ 1%] PASSED tests/test_common.py::test_normalization_monotonic[alg0] tests/test_common.py::test_normalization_monotonic[alg1] [gw5] [ 1%] PASSED tests/test_common.py::test_normalization_same[alg13] tests/test_common.py::test_normalization_same[alg14] [gw0] [ 2%] FAILED tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_range[alg1] [gw4] [ 2%] PASSED tests/test_common.py::test_normalization_same[alg1] tests/test_common.py::test_normalization_same[alg2] [gw3] [ 2%] PASSED tests/test_common.py::test_normalization_by_one[alg13] tests/test_common.py::test_normalization_by_one[alg14] [gw1] [ 2%] PASSED tests/test_common.py::test_normalization_range[alg13] [gw7] [ 3%] PASSED tests/test_common.py::test_normalization_monotonic[alg13] tests/test_common.py::test_normalization_range[alg14] tests/test_common.py::test_normalization_monotonic[alg14] [gw4] [ 3%] PASSED tests/test_common.py::test_normalization_same[alg2] [gw5] [ 3%] PASSED tests/test_common.py::test_normalization_same[alg14] tests/test_common.py::test_normalization_same[alg3] tests/test_common.py::test_normalization_same[alg15] [gw0] [ 3%] FAILED tests/test_common.py::test_normalization_range[alg1] tests/test_common.py::test_normalization_range[alg2] [gw2] [ 4%] FAILED tests/test_common.py::test_normalization_by_one[alg1] [gw6] [ 4%] PASSED tests/test_common.py::test_normalization_monotonic[alg1] tests/test_common.py::test_normalization_by_one[alg2] tests/test_common.py::test_normalization_monotonic[alg2] [gw3] [ 4%] PASSED tests/test_common.py::test_normalization_by_one[alg14] tests/test_common.py::test_normalization_by_one[alg15] [gw1] [ 4%] PASSED tests/test_common.py::test_normalization_range[alg14] tests/test_common.py::test_normalization_range[alg15] [gw7] [ 5%] PASSED tests/test_common.py::test_normalization_monotonic[alg14] tests/test_common.py::test_normalization_monotonic[alg15] [gw4] [ 5%] PASSED tests/test_common.py::test_normalization_same[alg3] tests/test_common.py::test_normalization_same[alg4] [gw2] [ 5%] PASSED tests/test_common.py::test_normalization_by_one[alg2] [gw5] [ 5%] PASSED tests/test_common.py::test_normalization_same[alg15] tests/test_common.py::test_normalization_by_one[alg3] [gw0] [ 6%] FAILED tests/test_common.py::test_normalization_range[alg2] tests/test_common.py::test_normalization_range[alg3] tests/test_common.py::test_normalization_same[alg16] [gw6] [ 6%] PASSED tests/test_common.py::test_normalization_monotonic[alg2] tests/test_common.py::test_normalization_monotonic[alg3] [gw4] [ 6%] PASSED tests/test_common.py::test_normalization_same[alg4] tests/test_common.py::test_normalization_same[alg5] [gw5] [ 6%] PASSED tests/test_common.py::test_normalization_same[alg16] tests/test_common.py::test_normalization_same[alg17] [gw7] [ 7%] PASSED tests/test_common.py::test_normalization_monotonic[alg15] tests/test_common.py::test_normalization_monotonic[alg16] [gw1] [ 7%] PASSED tests/test_common.py::test_normalization_range[alg15] tests/test_common.py::test_normalization_range[alg16] [gw0] [ 7%] PASSED tests/test_common.py::test_normalization_range[alg3] tests/test_common.py::test_normalization_range[alg4] [gw2] [ 7%] PASSED tests/test_common.py::test_normalization_by_one[alg3] tests/test_common.py::test_normalization_by_one[alg4] [gw3] [ 8%] FAILED tests/test_common.py::test_normalization_by_one[alg15] tests/test_common.py::test_normalization_by_one[alg16] [gw4] [ 8%] PASSED tests/test_common.py::test_normalization_same[alg5] tests/test_common.py::test_normalization_same[alg6] [gw5] [ 8%] PASSED tests/test_common.py::test_normalization_same[alg17] tests/test_common.py::test_normalization_same[alg18] [gw6] [ 8%] PASSED tests/test_common.py::test_normalization_monotonic[alg3] tests/test_common.py::test_normalization_monotonic[alg4] [gw0] [ 8%] PASSED tests/test_common.py::test_normalization_range[alg4] tests/test_common.py::test_normalization_range[alg5] [gw2] [ 9%] PASSED tests/test_common.py::test_normalization_by_one[alg4] tests/test_common.py::test_normalization_by_one[alg5] [gw4] [ 9%] PASSED tests/test_common.py::test_normalization_same[alg6] tests/test_common.py::test_normalization_same[alg7] [gw3] [ 9%] FAILED tests/test_common.py::test_normalization_by_one[alg16] tests/test_common.py::test_normalization_by_one[alg17] [gw1] [ 9%] FAILED tests/test_common.py::test_normalization_range[alg16] tests/test_common.py::test_normalization_range[alg17] [gw5] [ 10%] PASSED tests/test_common.py::test_normalization_same[alg18] tests/test_common.py::test_normalization_same[alg19] [gw0] [ 10%] PASSED tests/test_common.py::test_normalization_range[alg5] tests/test_common.py::test_normalization_range[alg6] [gw6] [ 10%] PASSED tests/test_common.py::test_normalization_monotonic[alg4] tests/test_common.py::test_normalization_monotonic[alg5] [gw4] [ 10%] PASSED tests/test_common.py::test_normalization_same[alg7] tests/test_common.py::test_normalization_same[alg8] [gw7] [ 11%] PASSED tests/test_common.py::test_normalization_monotonic[alg16] tests/test_common.py::test_normalization_monotonic[alg17] [gw3] [ 11%] PASSED tests/test_common.py::test_normalization_by_one[alg17] [gw2] [ 11%] FAILED tests/test_common.py::test_normalization_by_one[alg5] tests/test_common.py::test_normalization_by_one[alg6] tests/test_common.py::test_normalization_by_one[alg18] [gw5] [ 11%] PASSED tests/test_common.py::test_normalization_same[alg19] tests/test_common.py::test_normalization_same[alg20] [gw1] [ 12%] PASSED tests/test_common.py::test_normalization_range[alg17] tests/test_common.py::test_normalization_range[alg18] [gw2] [ 12%] PASSED tests/test_common.py::test_normalization_by_one[alg6] tests/test_common.py::test_normalization_by_one[alg7] [gw6] [ 12%] PASSED tests/test_common.py::test_normalization_monotonic[alg5] tests/test_common.py::test_normalization_monotonic[alg6] [gw3] [ 12%] PASSED tests/test_common.py::test_normalization_by_one[alg18] tests/test_common.py::test_normalization_by_one[alg19] [gw5] [ 13%] PASSED tests/test_common.py::test_normalization_same[alg20] tests/test_common.py::test_normalization_same[alg21] [gw7] [ 13%] PASSED tests/test_common.py::test_normalization_monotonic[alg17] tests/test_common.py::test_normalization_monotonic[alg18] [gw1] [ 13%] PASSED tests/test_common.py::test_normalization_range[alg18] tests/test_common.py::test_normalization_range[alg19] [gw0] [ 13%] FAILED tests/test_common.py::test_normalization_range[alg6] tests/test_common.py::test_normalization_range[alg7] [gw4] [ 14%] FAILED tests/test_common.py::test_normalization_same[alg8] tests/test_common.py::test_normalization_same[alg9] [gw2] [ 14%] PASSED tests/test_common.py::test_normalization_by_one[alg7] tests/test_common.py::test_normalization_by_one[alg8] [gw3] [ 14%] PASSED tests/test_common.py::test_normalization_by_one[alg19] tests/test_common.py::test_normalization_by_one[alg20] [gw6] [ 14%] PASSED tests/test_common.py::test_normalization_monotonic[alg6] tests/test_common.py::test_normalization_monotonic[alg7] [gw0] [ 15%] PASSED tests/test_common.py::test_normalization_range[alg7] [gw1] [ 15%] PASSED tests/test_common.py::test_normalization_range[alg19] tests/test_common.py::test_normalization_range[alg20] tests/test_common.py::test_normalization_range[alg8] [gw7] [ 15%] PASSED tests/test_common.py::test_normalization_monotonic[alg18] [gw4] [ 15%] FAILED tests/test_common.py::test_normalization_same[alg9] tests/test_common.py::test_normalization_monotonic[alg19] tests/test_common.py::test_normalization_same[alg10] [gw2] [ 16%] PASSED tests/test_common.py::test_normalization_by_one[alg8] tests/test_common.py::test_normalization_by_one[alg9] [gw3] [ 16%] PASSED tests/test_common.py::test_normalization_by_one[alg20] tests/test_common.py::test_normalization_by_one[alg21] [gw0] [ 16%] PASSED tests/test_common.py::test_normalization_range[alg8] tests/test_common.py::test_normalization_range[alg9] [gw6] [ 16%] PASSED tests/test_common.py::test_normalization_monotonic[alg7] tests/test_common.py::test_normalization_monotonic[alg8] [gw5] [ 16%] FAILED tests/test_common.py::test_normalization_same[alg21] [gw1] [ 17%] PASSED tests/test_common.py::test_normalization_range[alg20] tests/test_common.py::test_normalization_range[alg21] tests/test_common.py::test_normalization_same[alg22] [gw4] [ 17%] PASSED tests/test_common.py::test_normalization_same[alg10] tests/test_common.py::test_normalization_same[alg11] [gw7] [ 17%] PASSED tests/test_common.py::test_normalization_monotonic[alg19] tests/test_common.py::test_normalization_monotonic[alg20] [gw2] [ 17%] PASSED tests/test_common.py::test_normalization_by_one[alg9] tests/test_common.py::test_normalization_by_one[alg10] [gw5] [ 18%] PASSED tests/test_common.py::test_normalization_same[alg22] tests/test_common.py::test_normalization_same[alg23] [gw3] [ 18%] PASSED tests/test_common.py::test_normalization_by_one[alg21] tests/test_common.py::test_normalization_by_one[alg22] [gw4] [ 18%] PASSED tests/test_common.py::test_normalization_same[alg11] tests/test_common.py::test_no_common_chars[alg0] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg0] tests/test_common.py::test_no_common_chars[alg1] [gw6] [ 19%] PASSED tests/test_common.py::test_normalization_monotonic[alg8] tests/test_common.py::test_normalization_monotonic[alg9] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg1] tests/test_common.py::test_no_common_chars[alg2] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg2] tests/test_common.py::test_no_common_chars[alg3] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg3] tests/test_common.py::test_no_common_chars[alg4] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg4] tests/test_common.py::test_no_common_chars[alg5] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg5] tests/test_common.py::test_no_common_chars[alg6] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg6] tests/test_common.py::test_no_common_chars[alg7] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg7] tests/test_common.py::test_no_common_chars[alg8] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg8] [gw1] [ 21%] FAILED tests/test_common.py::test_normalization_range[alg21] tests/test_common.py::test_normalization_range[alg22] tests/test_common.py::test_no_common_chars[alg9] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg9] tests/test_common.py::test_no_common_chars[alg10] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg10] tests/test_common.py::test_no_common_chars[alg11] [gw0] [ 22%] PASSED tests/test_common.py::test_normalization_range[alg9] tests/test_common.py::test_normalization_range[alg10] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg11] tests/test_common.py::test_no_common_chars[alg12] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg12] tests/test_common.py::test_no_common_chars[alg13] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg13] tests/test_common.py::test_no_common_chars[alg14] [gw4] [ 23%] PASSED tests/test_common.py::test_no_common_chars[alg14] tests/test_common.py::test_no_common_chars[alg15] [gw4] [ 23%] PASSED tests/test_common.py::test_no_common_chars[alg15] tests/test_common.py::test_no_common_chars[alg16] [gw4] [ 23%] PASSED tests/test_common.py::test_no_common_chars[alg16] tests/test_common.py::test_no_common_chars[alg17] [gw4] [ 23%] PASSED tests/test_common.py::test_no_common_chars[alg17] tests/test_common.py::test_empty[alg11] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg11] tests/test_common.py::test_empty[alg12] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg12] tests/test_common.py::test_empty[alg13] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg13] tests/test_common.py::test_empty[alg14] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg14] [gw5] [ 25%] PASSED tests/test_common.py::test_normalization_same[alg23] tests/test_common.py::test_empty[alg15] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg15] tests/test_common.py::test_no_common_chars[alg18] tests/test_common.py::test_empty[alg16] [gw5] [ 25%] PASSED tests/test_common.py::test_no_common_chars[alg18] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg16] tests/test_common.py::test_empty[alg17] tests/test_common.py::test_no_common_chars[alg19] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg17] [gw7] [ 26%] PASSED tests/test_common.py::test_normalization_monotonic[alg20] [gw5] [ 26%] PASSED tests/test_common.py::test_no_common_chars[alg19] tests/test_common.py::test_empty[alg18] tests/test_common.py::test_normalization_monotonic[alg21] [gw4] [ 26%] PASSED tests/test_common.py::test_empty[alg18] tests/test_common.py::test_no_common_chars[alg20] tests/test_common.py::test_empty[alg19] [gw5] [ 26%] PASSED tests/test_common.py::test_no_common_chars[alg20] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg19] tests/test_common.py::test_empty[alg20] tests/test_common.py::test_no_common_chars[alg21] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg20] [gw5] [ 27%] PASSED tests/test_common.py::test_no_common_chars[alg21] tests/test_common.py::test_empty[alg21] tests/test_common.py::test_no_common_chars[alg22] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg21] tests/test_common.py::test_empty[alg22] [gw5] [ 28%] PASSED tests/test_common.py::test_no_common_chars[alg22] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg22] tests/test_common.py::test_no_common_chars[alg23] tests/test_common.py::test_empty[alg23] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg23] tests/test_common.py::test_unequal_distance[alg0] [gw4] [ 28%] PASSED tests/test_common.py::test_unequal_distance[alg0] [gw5] [ 29%] PASSED tests/test_common.py::test_no_common_chars[alg23] tests/test_common.py::test_unequal_distance[alg1] tests/test_common.py::test_empty[alg0] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg1] tests/test_common.py::test_unequal_distance[alg2] [gw5] [ 29%] PASSED tests/test_common.py::test_empty[alg0] tests/test_common.py::test_empty[alg1] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg2] [gw5] [ 30%] PASSED tests/test_common.py::test_empty[alg1] tests/test_common.py::test_empty[alg2] tests/test_common.py::test_unequal_distance[alg3] [gw5] [ 30%] PASSED tests/test_common.py::test_empty[alg2] [gw4] [ 30%] PASSED tests/test_common.py::test_unequal_distance[alg3] tests/test_common.py::test_empty[alg3] tests/test_common.py::test_unequal_distance[alg4] [gw5] [ 30%] PASSED tests/test_common.py::test_empty[alg3] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg4] tests/test_common.py::test_empty[alg4] tests/test_common.py::test_unequal_distance[alg5] [gw5] [ 31%] PASSED tests/test_common.py::test_empty[alg4] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg5] tests/test_common.py::test_empty[alg5] tests/test_common.py::test_unequal_distance[alg6] [gw5] [ 31%] PASSED tests/test_common.py::test_empty[alg5] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg6] tests/test_common.py::test_empty[alg6] tests/test_common.py::test_unequal_distance[alg7] [gw5] [ 32%] PASSED tests/test_common.py::test_empty[alg6] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg7] tests/test_common.py::test_unequal_distance[alg8] tests/test_common.py::test_empty[alg7] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg8] [gw5] [ 33%] PASSED tests/test_common.py::test_empty[alg7] tests/test_common.py::test_unequal_distance[alg9] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg9] tests/test_common.py::test_unequal_distance[alg10] tests/test_common.py::test_empty[alg8] [gw5] [ 33%] PASSED tests/test_common.py::test_empty[alg8] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg10] tests/test_common.py::test_empty[alg9] tests/test_common.py::test_unequal_distance[alg11] [gw5] [ 33%] PASSED tests/test_common.py::test_empty[alg9] tests/test_common.py::test_empty[alg10] [gw5] [ 34%] PASSED tests/test_common.py::test_empty[alg10] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg11] tests/test_common.py::test_unequal_distance[alg17] tests/test_common.py::test_unequal_distance[alg12] [gw5] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg17] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg12] [gw3] [ 35%] PASSED tests/test_common.py::test_normalization_by_one[alg22] tests/test_common.py::test_unequal_distance[alg13] tests/test_common.py::test_unequal_distance[alg18] [gw4] [ 35%] PASSED tests/test_common.py::test_unequal_distance[alg13] tests/test_common.py::test_normalization_by_one[alg23] [gw5] [ 35%] PASSED tests/test_common.py::test_unequal_distance[alg18] tests/test_common.py::test_unequal_distance[alg14] tests/test_common.py::test_unequal_distance[alg19] [gw5] [ 35%] PASSED tests/test_common.py::test_unequal_distance[alg19] [gw4] [ 36%] PASSED tests/test_common.py::test_unequal_distance[alg14] tests/test_common.py::test_unequal_distance[alg20] tests/test_common.py::test_unequal_distance[alg15] [gw5] [ 36%] PASSED tests/test_common.py::test_unequal_distance[alg20] tests/test_common.py::test_unequal_distance[alg21] [gw4] [ 36%] PASSED tests/test_common.py::test_unequal_distance[alg15] [gw5] [ 36%] PASSED tests/test_common.py::test_unequal_distance[alg21] tests/test_common.py::test_unequal_distance[alg16] tests/test_common.py::test_unequal_distance[alg22] [gw5] [ 37%] PASSED tests/test_common.py::test_unequal_distance[alg22] [gw4] [ 37%] PASSED tests/test_common.py::test_unequal_distance[alg16] tests/test_common.py::test_unequal_distance[alg23] [gw5] [ 37%] PASSED tests/test_common.py::test_unequal_distance[alg23] tests/test_external.py::test_qval[None-Jaro] tests/test_external.py::test_compare[Jaro] [gw2] [ 37%] FAILED tests/test_common.py::test_normalization_by_one[alg10] tests/test_common.py::test_normalization_by_one[alg11] [gw0] [ 38%] PASSED tests/test_common.py::test_normalization_range[alg10] tests/test_common.py::test_normalization_range[alg11] [gw6] [ 38%] PASSED tests/test_common.py::test_normalization_monotonic[alg9] tests/test_common.py::test_normalization_monotonic[alg10] [gw7] [ 38%] PASSED tests/test_common.py::test_normalization_monotonic[alg21] tests/test_common.py::test_normalization_monotonic[alg22] [gw1] [ 38%] FAILED tests/test_common.py::test_normalization_range[alg22] tests/test_common.py::test_normalization_range[alg23] [gw2] [ 39%] PASSED tests/test_common.py::test_normalization_by_one[alg11] tests/test_compression/test_common.py::test_simmetry[alg5] [gw4] [ 39%] PASSED tests/test_external.py::test_qval[None-Jaro] tests/test_external.py::test_qval[None-JaroWinkler] [gw5] [ 39%] PASSED tests/test_external.py::test_compare[Jaro] tests/test_external.py::test_compare[JaroWinkler] [gw0] [ 39%] PASSED tests/test_common.py::test_normalization_range[alg11] tests/test_compression/test_common.py::test_normalized_by_one[alg3] [gw6] [ 40%] PASSED tests/test_common.py::test_normalization_monotonic[alg10] tests/test_common.py::test_normalization_monotonic[alg11] [gw1] [ 40%] FAILED tests/test_common.py::test_normalization_range[alg23] tests/test_compression/test_entropy_ncd.py::test_normalization_range [gw3] [ 40%] FAILED tests/test_common.py::test_normalization_by_one[alg23] tests/test_external.py::test_list_of_numbers[JaroWinkler] [gw7] [ 40%] PASSED tests/test_common.py::test_normalization_monotonic[alg22] tests/test_common.py::test_normalization_monotonic[alg23] [gw5] [ 41%] PASSED tests/test_external.py::test_compare[JaroWinkler] tests/test_compression/test_common.py::test_monotonicity[alg4] [gw5] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg4] tests/test_compression/test_common.py::test_monotonicity[alg5] [gw2] [ 41%] PASSED tests/test_compression/test_common.py::test_simmetry[alg5] [gw5] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg5] tests/test_compression/test_common.py::test_simmetry[alg6] tests/test_compression/test_common.py::test_monotonicity[alg6] [gw5] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg6] tests/test_compression/test_common.py::test_simmetry[alg0] [gw0] [ 42%] PASSED tests/test_compression/test_common.py::test_normalized_by_one[alg3] tests/test_compression/test_common.py::test_normalized_by_one[alg4] [gw3] [ 42%] PASSED tests/test_external.py::test_list_of_numbers[JaroWinkler] tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] [gw3] [ 42%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] [gw3] [ 42%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] tests/test_compression/test_arith_ncd.py::test_make_probs [gw3] [ 43%] PASSED tests/test_compression/test_arith_ncd.py::test_make_probs tests/test_compression/test_arith_ncd.py::test_arith_output [gw3] [ 43%] PASSED tests/test_compression/test_arith_ncd.py::test_arith_output tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] [gw3] [ 43%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] [gw3] [ 43%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] [gw3] [ 44%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] [gw3] [ 44%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] tests/test_compression/test_common.py::test_monotonicity[alg0] [gw3] [ 44%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg0] tests/test_compression/test_common.py::test_monotonicity[alg1] [gw3] [ 44%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg1] tests/test_compression/test_common.py::test_monotonicity[alg2] [gw6] [ 45%] PASSED tests/test_common.py::test_normalization_monotonic[alg11] [gw3] [ 45%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg2] tests/test_compression/test_common.py::test_monotonicity[alg3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] [gw3] [ 45%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg3] [gw6] [ 45%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] [gw4] [ 46%] FAILED tests/test_external.py::test_qval[None-JaroWinkler] tests/test_external.py::test_qval[1-Jaro] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] [gw3] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] [gw1] [ 47%] FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] [gw1] [ 47%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] [gw3] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] [gw1] [ 48%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] [gw3] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] [gw3] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] [gw3] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] [gw3] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] [gw3] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] tests/test_edit/test_editex.py::test_distance[nelson--12] [gw6] [ 50%] PASSED tests/test_edit/test_editex.py::test_distance[nelson--12] tests/test_edit/test_editex.py::test_distance[-neilsen-14] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] [gw3] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] [gw3] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] [gw6] [ 51%] PASSED tests/test_edit/test_editex.py::test_distance[-neilsen-14] tests/test_edit/test_editex.py::test_distance[ab-a-2] [gw6] [ 51%] PASSED tests/test_edit/test_editex.py::test_distance[ab-a-2] tests/test_edit/test_editex.py::test_distance[ab-c-4] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[ab-c-4] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] [gw3] [ 52%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] [gw0] [ 52%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg4] [gw3] [ 53%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] tests/test_edit/test_editex.py::test_distance[--0] [gw3] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[--0] tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] [gw3] [ 53%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] [gw3] [ 53%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] [gw3] [ 54%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] [gw3] [ 54%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] tests/test_compression/test_common.py::test_normalized_by_one[alg5] tests/test_edit/test_editex.py::test_distance[-MARTHA-12] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance[-MARTHA-12] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] [gw3] [ 54%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_hamming.py::test_distance[test-text-1] [gw6] [ 55%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] [gw3] [ 55%] PASSED tests/test_edit/test_hamming.py::test_distance[test-text-1] tests/test_edit/test_hamming.py::test_distance[test-tset-2] [gw3] [ 55%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tset-2] tests/test_edit/test_hamming.py::test_distance[test-qwe-4] [gw6] [ 55%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] [gw3] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-qwe-4] tests/test_edit/test_hamming.py::test_distance[test-testit-2] [gw3] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-testit-2] [gw6] [ 56%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] tests/test_edit/test_hamming.py::test_distance[test-tesst-2] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] [gw3] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tesst-2] tests/test_edit/test_hamming.py::test_distance[test-tet-2] [gw3] [ 57%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tet-2] tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] [gw6] [ 57%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] [gw3] [ 57%] PASSED tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] [gw3] [ 57%] PASSED tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] [gw6] [ 58%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] [gw3] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] [gw3] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] [gw6] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] [gw3] [ 58%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw6] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] [gw3] [ 59%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] [gw6] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] [gw6] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] [gw6] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] [gw3] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] [gw6] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] [gw3] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] [gw2] [ 61%] PASSED tests/test_compression/test_common.py::test_simmetry[alg6] [gw6] [ 61%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] tests/test_compression/test_common.py::test_is_normalized[alg0] [gw3] [ 61%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] [gw6] [ 61%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] tests/test_edit/test_levenshtein.py::test_distance[test-text-1] tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] [gw3] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-text-1] [gw6] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] [gw3] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] [gw6] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] tests/test_edit/test_matrix.py::test_distance[--1] [gw3] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[--1] tests/test_edit/test_matrix.py::test_distance[-a-0] tests/test_edit/test_matrix.py::test_distance[A-C--3] [gw3] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[-a-0] [gw6] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[A-C--3] tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] [gw3] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] tests/test_edit/test_matrix.py::test_distance[G-G-7] tests/test_edit/test_matrix.py::test_distance[T-C-0] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[G-G-7] tests/test_edit/test_matrix.py::test_distance[A-A-10] [gw3] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[T-C-0] tests/test_edit/test_matrix.py::test_distance[A-G--1] [gw1] [ 64%] PASSED tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[A-A-10] [gw3] [ 65%] PASSED tests/test_edit/test_matrix.py::test_distance[A-G--1] tests/test_edit/test_matrix.py::test_distance[C-T-0] tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor tests/test_edit/test_matrix.py::test_distance[T-A--4] [gw7] [ 65%] PASSED tests/test_common.py::test_normalization_monotonic[alg23] [gw3] [ 65%] PASSED tests/test_edit/test_matrix.py::test_distance[C-T-0] [gw6] [ 65%] PASSED tests/test_edit/test_matrix.py::test_distance[T-A--4] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] [gw7] [ 66%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] tests/test_edit/test_mlipns.py::test_distance[--1] tests/test_edit/test_mlipns.py::test_distance[a--0] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] [gw3] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[--1] [gw7] [ 66%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] [gw6] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[a--0] [gw7] [ 66%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] [gw3] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] tests/test_edit/test_mlipns.py::test_distance[-a-0] tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] [gw6] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[-a-0] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] [gw3] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] tests/test_edit/test_mlipns.py::test_distance[a-a-1] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] [gw7] [ 67%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] [gw6] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[a-a-1] [gw3] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] tests/test_edit/test_mlipns.py::test_distance[ab-a-1] [gw6] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[ab-a-1] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] [gw3] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] [gw7] [ 69%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] [gw6] [ 69%] PASSED tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] [gw3] [ 69%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] [gw7] [ 69%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] [gw6] [ 70%] PASSED tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] [gw3] [ 70%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] [gw7] [ 70%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] [gw6] [ 70%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw7] [ 71%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] [gw6] [ 71%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] [gw3] [ 71%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] [gw6] [ 71%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] [gw6] [ 72%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] [gw7] [ 72%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] [gw3] [ 72%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] [gw6] [ 72%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] [gw3] [ 73%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] tests/test_phonetic/test_editex.py::test_distance[nelson--12] [gw7] [ 73%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw3] [ 73%] PASSED tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] [gw6] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson--12] tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] [gw7] [ 74%] PASSED tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw6] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] tests/test_phonetic/test_editex.py::test_distance[--0] [gw3] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[--0] [gw0] [ 74%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg5] tests/test_phonetic/test_editex.py::test_distance[ab-c-4] tests/test_phonetic/test_editex.py::test_distance[ab-a-2] tests/test_compression/test_common.py::test_normalized_by_one[alg6] [gw3] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-c-4] tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] [gw6] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-a-2] tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] [gw3] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] [gw7] [ 75%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] [gw6] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] [gw3] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] [gw7] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] [gw3] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] [gw3] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] tests/test_phonetic/test_editex.py::test_local[--0] [gw6] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] [gw3] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_local[--0] tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] [gw7] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] [gw7] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] tests/test_phonetic/test_editex.py::test_local[nelson--12] tests/test_phonetic/test_editex.py::test_local[-neilsen-14] [gw3] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson--12] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[-neilsen-14] tests/test_phonetic/test_editex.py::test_local[ab-c-2] [gw7] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] [gw3] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-c-2] tests/test_phonetic/test_editex.py::test_local[ab-a-2] tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] [gw6] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-a-2] [gw7] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_local[niall-neal-1] tests/test_phonetic/test_editex.py::test_local[neal-niall-1] [gw3] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] [gw6] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-neal-1] [gw7] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-niall-1] tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] [gw3] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] [gw6] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] [gw7] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] [gw3] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] [gw6] [ 81%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] [gw7] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] [gw3] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] [gw4] [ 82%] PASSED tests/test_external.py::test_qval[1-Jaro] tests/test_external.py::test_qval[1-JaroWinkler] tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] [gw6] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] [gw7] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] [gw3] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] [gw3] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] [gw3] [ 83%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] [gw5] [ 83%] FAILED tests/test_compression/test_common.py::test_simmetry[alg0] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] tests/test_compression/test_common.py::test_simmetry[alg1] [gw6] [ 84%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] [gw3] [ 84%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] [gw6] [ 84%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] [gw6] [ 84%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] [gw6] [ 85%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] [gw3] [ 85%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] [gw7] [ 85%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] [gw3] [ 85%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] [gw6] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] [gw3] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] [gw6] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] tests/test_token/test_bag.py::test_distance[qwe-qwe-0] tests/test_token/test_bag.py::test_distance[qwe-erty-3] [gw6] [ 86%] PASSED tests/test_token/test_bag.py::test_distance[qwe-erty-3] tests/test_token/test_bag.py::test_distance[qwe-rtys-4] [gw6] [ 87%] PASSED tests/test_token/test_bag.py::test_distance[qwe-rtys-4] tests/test_token/test_cosine.py::test_distance[test-text-0.75] [gw6] [ 87%] PASSED tests/test_token/test_cosine.py::test_distance[test-text-0.75] tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] [gw6] [ 87%] PASSED tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] tests/test_token/test_jaccard.py::test_distance[test-text-0.6] [gw6] [ 87%] PASSED tests/test_token/test_jaccard.py::test_distance[test-text-0.6] tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] [gw6] [ 88%] PASSED tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] [gw7] [ 88%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] [gw6] [ 88%] PASSED tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] [gw3] [ 88%] PASSED tests/test_token/test_bag.py::test_distance[qwe-qwe-0] [gw1] [ 89%] PASSED tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] tests/test_token/test_jaccard.py::test_compare_with_tversky tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor tests/test_token/test_bag.py::test_distance[qwe-ewq-0] [gw7] [ 89%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] [gw3] [ 89%] PASSED tests/test_token/test_bag.py::test_distance[qwe-ewq-0] tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] [gw7] [ 89%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] [gw3] [ 90%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] tests/test_token/test_overlap.py::test_distance[test-text-0.75] tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] [gw7] [ 90%] PASSED tests/test_token/test_overlap.py::test_distance[test-text-0.75] [gw3] [ 90%] PASSED tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] [gw7] [ 90%] PASSED tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] tests/test_token/test_sorensen.py::test_compare_with_tversky tests/test_token/test_sorensen.py::test_distance[test-text-0.75] [gw3] [ 91%] PASSED tests/test_token/test_sorensen.py::test_distance[test-text-0.75] tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw5] [ 91%] FAILED tests/test_compression/test_common.py::test_simmetry[alg1] tests/test_compression/test_common.py::test_simmetry[alg2] [gw2] [ 91%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] tests/test_compression/test_common.py::test_is_normalized[alg1] [gw0] [ 91%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] [gw0] [ 91%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] [gw0] [ 92%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] [gw4] [ 92%] FAILED tests/test_external.py::test_qval[1-JaroWinkler] [gw0] [ 92%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] tests/test_external.py::test_qval[2-Jaro] tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor [gw1] [ 92%] PASSED tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor [gw3] [ 93%] PASSED tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw7] [ 93%] PASSED tests/test_token/test_sorensen.py::test_compare_with_tversky [gw6] [ 93%] PASSED tests/test_token/test_jaccard.py::test_compare_with_tversky tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set [gw5] [ 93%] FAILED tests/test_compression/test_common.py::test_simmetry[alg2] [gw0] [ 94%] PASSED tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor tests/test_compression/test_common.py::test_simmetry[alg3] tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor [gw2] [ 94%] PASSED tests/test_compression/test_common.py::test_is_normalized[alg1] tests/test_compression/test_common.py::test_is_normalized[alg2] [gw4] [ 94%] PASSED tests/test_external.py::test_qval[2-Jaro] tests/test_external.py::test_qval[2-JaroWinkler] [gw1] [ 94%] PASSED tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor tests/test_compression/test_sqrt_ncd.py::test_normalization_range [gw0] [ 95%] PASSED tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor [gw6] [ 95%] PASSED tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set [gw5] [ 95%] PASSED tests/test_compression/test_common.py::test_simmetry[alg3] tests/test_compression/test_common.py::test_simmetry[alg4] [gw2] [ 95%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] tests/test_compression/test_common.py::test_is_normalized[alg3] [gw4] [ 96%] PASSED tests/test_external.py::test_qval[2-JaroWinkler] tests/test_external.py::test_qval[3-Jaro] [gw1] [ 96%] PASSED tests/test_compression/test_sqrt_ncd.py::test_normalization_range tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] [gw1] [ 96%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] [gw1] [ 96%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] [gw0] [ 97%] PASSED tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor [gw2] [ 97%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg3] tests/test_compression/test_common.py::test_is_normalized[alg4] [gw5] [ 97%] PASSED tests/test_compression/test_common.py::test_simmetry[alg4] [gw4] [ 97%] PASSED tests/test_external.py::test_qval[3-Jaro] tests/test_external.py::test_qval[3-JaroWinkler] [gw0] [ 98%] PASSED tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor [gw2] [ 98%] PASSED tests/test_compression/test_common.py::test_is_normalized[alg4] tests/test_compression/test_common.py::test_is_normalized[alg5] [gw2] [ 98%] PASSED tests/test_compression/test_common.py::test_is_normalized[alg5] tests/test_compression/test_common.py::test_is_normalized[alg6] [gw4] [ 98%] PASSED tests/test_external.py::test_qval[3-JaroWinkler] tests/test_external.py::test_list_of_numbers[Jaro] [gw2] [ 99%] PASSED tests/test_compression/test_common.py::test_is_normalized[alg6] tests/test_compression/test_common.py::test_normalized_by_one[alg0] [gw4] [ 99%] PASSED tests/test_external.py::test_list_of_numbers[Jaro] [gw2] [ 99%] PASSED tests/test_compression/test_common.py::test_normalized_by_one[alg0] tests/test_compression/test_common.py::test_normalized_by_one[alg1] [gw2] [ 99%] PASSED tests/test_compression/test_common.py::test_normalized_by_one[alg1] tests/test_compression/test_common.py::test_normalized_by_one[alg2] [gw2] [100%] PASSED tests/test_compression/test_common.py::test_normalized_by_one[alg2] =================================== FAILURES =================================== ________________________ test_normalization_range[alg0] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 16 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 16 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) function = .run at 0xffffffb04e8e00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 16 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'á\x05', right = 'á\x05', alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('á\x05', 'á\x05', Bag({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 4884314.328458316, result = None finish = 4884314.599103264, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=270645) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 270.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 16 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 270.65ms, which exceeds the deadline of 200.00ms'), "args = ('á\\x05', 'á\\x05', Bag({'qv...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='á\x05', right='á\x05', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Bag({'qval': 1, 'external': True}), E left='á\x05', E right='á\x05', E ) E Unreliable test timings! On an initial run, this test took 270.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.01 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg1] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 98 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 98 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 98 bytes, frozen) function = .run at 0xffffffb0391120> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 98 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = "§ÓA'ÎLÌ\x14\x16", right = '\U000af845Uk=Ä\U000964d2\x13Øv' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ("§ÓA'ÎLÌ\x14\x16", '\U000af845Uk=Ä\U000964d2\x13Øv', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 4884323.619661141, result = None finish = 4884325.510795647, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=891135) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1891.13ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 98 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1891.13ms, which exceeds the deadline of 200.00ms'), 'args = ("§ÓA\'ÎLÌ\\x14\\x16", \'\\U...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left="§ÓA'ÎLÌ\x14\x16", right='\U000af845Uk=Ä\U000964d2\x13Øv', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left="§ÓA'ÎLÌ\x14\x16", E right='\U000af845Uk=Ä\U000964d2\x13Øv', E ) E Unreliable test timings! On an initial run, this test took 1891.13ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.55 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg1] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 60 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 60 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 60 bytes, frozen) function = .run at 0xffffff67308180> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 60 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '𫟗\U000b2019\U0004e310\x12', right = ',Vép' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𫟗\U000b2019\U0004e310\x12', ',Vép', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 4884323.718983229, result = None finish = 4884325.36753781, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=648555) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1648.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 60 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1648.56ms, which exceeds the deadline of 200.00ms'), "args = ('𫟗\\U000b2019\\U0004e310\\x...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='𫟗\U000b2019\U0004e310\x12', right=',Vép', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left='𫟗\U000b2019\U0004e310\x12', E right=',Vép', E ) E Unreliable test timings! On an initial run, this test took 1648.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.52 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg2] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 61 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 61 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 61 bytes, frozen) function = .run at 0xffffffb03e1d00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 61 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'h', right = 'U\U000fcf5a \U000c85f9?\xa0\x1a\x9a' alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('h', 'U\U000fcf5a \U000c85f9?\xa0\x1a\x9a', Levenshtein({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 4884331.017490142, result = None finish = 4884331.398383564, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=380893) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 380.89ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 61 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 380.89ms, which exceeds the deadline of 200.00ms'), "args = ('h', 'U\\U000fcf5a \\U000c85...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='h', right='U\U000fcf5a \U000c85f9?\xa0\x1a\x9a', alg=Levenshtein({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Levenshtein({'qval': 1, 'test_func': , 'external': True}), E left='h', E right='U\U000fcf5a \U000c85f9?\xa0\x1a\x9a', E ) E Unreliable test timings! On an initial run, this test took 380.89ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.45 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg15] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 43 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 43 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 43 bytes, frozen) function = .run at 0xffffff8ba37920> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 43 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'ùxÕ', right = 'c\x0c ò·>' alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ùxÕ', 'c\x0c ò·>', StrCmp95({'long_strings': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 4884338.691363233, result = None finish = 4884338.994586739, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=303224) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 303.22ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 43 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 303.22ms, which exceeds the deadline of 200.00ms'), "args = ('ùxÕ', 'c\\x0c ò·>', StrCmp9...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='ùxÕ', right='c\x0c ò·>', alg=StrCmp95({'long_strings': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=StrCmp95({'long_strings': False, 'external': True}), E left='ùxÕ', E right='c\x0c ò·>', E ) E Unreliable test timings! On an initial run, this test took 303.22ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.29 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg16] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 44 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 44 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 44 bytes, frozen) function = .run at 0xffffff8bad2c00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 44 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000b4695\U00064105\U00085ba8', right = 'ã\U000dff53' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000b4695\U00064105\U00085ba8', 'ã\U000dff53', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func':..._ident at 0xffffffa266d1c0>, 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 4884345.681280597, result = None finish = 4884347.283588111, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=602308) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1602.31ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 44 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1602.31ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000b4695\\U00064105\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U000b4695\U00064105\U00085ba8', right='ã\U000dff53', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='\U000b4695\U00064105\U00085ba8', E right='ã\U000dff53', E ) E Unreliable test timings! On an initial run, this test took 1602.31ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.12 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg16] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 46 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 46 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 46 bytes, frozen) function = .run at 0xffffff6fcfdee0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 46 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '𠥪', right = '»EÊÈ|\x7f\U00019655' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𠥪', '»EÊÈ|\x7f\U00019655', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 4884343.613691291, result = None finish = 4884344.29028786, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=676597) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 676.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 46 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 676.60ms, which exceeds the deadline of 200.00ms'), "args = ('𠥪', '»EÊÈ|\\x7f\\U00019655'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='𠥪', right='»EÊÈ|\x7f\U00019655', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='𠥪', E right='»EÊÈ|\x7f\U00019655', E ) E Unreliable test timings! On an initial run, this test took 676.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.32 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg5] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 30 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 30 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 30 bytes, frozen) function = .run at 0xffffff6744ede0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 30 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = 'ê\x17H\ns' alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'ê\x17H\ns', JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) kwargs = {}, initial_draws = 2, start = 4884351.009905879, result = None finish = 4884351.270586548, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=260681) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 260.68ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 30 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 260.68ms, which exceeds the deadline of 200.00ms'), "args = ('', 'ê\\x17H\\ns', JaroWinkl...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='ê\x17H\ns', alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}), E left='', E right='ê\x17H\ns', E ) E Unreliable test timings! On an initial run, this test took 260.68ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.17 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg6] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 32 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 32 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 32 bytes, frozen) function = .run at 0xffffffb0596160> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 32 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '\U000b98a2Ö²㹛£\x91' alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U000b98a2Ö²㹛£\x91', MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) kwargs = {}, initial_draws = 2, start = 4884357.728931394, result = None finish = 4884357.998572533, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=269641) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 269.64ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 32 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 269.64ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U000b98a2Ö²㹛£\\x91', ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='\U000b98a2Ö²㹛£\x91', alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}), E left='', E right='\U000b98a2Ö²㹛£\x91', E ) E Unreliable test timings! On an initial run, this test took 269.64ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.18 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg8] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 27 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 27 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 27 bytes, frozen) function = .run at 0xffffff9a57cfe0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 27 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U000c6373[×\U000f55bf', alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000c6373[×\U000f55bf', LCSStr({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 1, start = 4884357.031258258, result = None finish = 4884357.39710216, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=365844) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 365.84ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 27 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 365.84ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000c6373[×\\U000f55bf', ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000c6373[×\U000f55bf', alg=LCSStr({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=LCSStr({'qval': 1, 'external': True}), E text='\U000c6373[×\U000f55bf', E ) E Unreliable test timings! On an initial run, this test took 365.84ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.04 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg9] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 9 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 9 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 9 bytes, frozen) function = .run at 0xffffff9a471ee0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 9 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\x1c\x05', alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x1c\x05', RatcliffObershelp({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 1, start = 4884369.209917994, result = None finish = 4884369.595368152, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=385450) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 385.45ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 9 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 385.45ms, which exceeds the deadline of 200.00ms'), "args = ('\\x1c\\x05', RatcliffObersh...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\x1c\x05', alg=RatcliffObershelp({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=RatcliffObershelp({'qval': 1, 'external': True}), E text='\x1c\x05', E ) E Unreliable test timings! On an initial run, this test took 385.45ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.18 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg21] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 47 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 47 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 47 bytes, frozen) function = .run at 0xffffff7c7658a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 47 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = 'eÇ\x7fêXÚú\U000ca67c:' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('eÇ\x7fêXÚú\U000ca67c:', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 4884369.20561366, result = None finish = 4884369.598018073, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=392404) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 392.40ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 47 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 392.40ms, which exceeds the deadline of 200.00ms'), "args = ('eÇ\\x7fêXÚú\\U000ca67c:', N...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='eÇ\x7fêXÚú\U000ca67c:', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E text='eÇ\x7fêXÚú\U000ca67c:', E ) E Unreliable test timings! On an initial run, this test took 392.40ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.88 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg21] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 95 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 95 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 95 bytes, frozen) function = .run at 0xffffff6f49b560> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 95 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'j\U000b0d00=ò\x99', right = '\x94N\U000e1875\x03\x0b %-Þ\x8f' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('j\U000b0d00=ò\x99', '\x94N\U000e1875\x03\x0b %-Þ\x8f', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 4884379.101477482, result = None finish = 4884379.403387778, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=301910) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 301.91ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 95 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 301.91ms, which exceeds the deadline of 200.00ms'), "args = ('j\\U000b0d00=ò\\x99', '\\x9...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='j\U000b0d00=ò\x99', right='\x94N\U000e1875\x03\x0b %-Þ\x8f', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='j\U000b0d00=ò\x99', E right='\x94N\U000e1875\x03\x0b %-Þ\x8f', E ) E Unreliable test timings! On an initial run, this test took 301.91ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.98 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg10] _______________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 68 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 68 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 68 bytes, frozen) function = .run at 0xffffff66b4bba0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 68 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000fe4b2\x8e𗥞H\U00015c1fêÔ', right = '9Ä\x98\x1fì' alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000fe4b2\x8e𗥞H\U00015c1fêÔ', '9Ä\x98\x1fì', Jaccard({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 4884383.257717761, result = None finish = 4884383.602449397, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=344732) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 344.73ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 68 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 344.73ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000fe4b2\\x8e𗥞H\\U00015c...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U000fe4b2\x8e𗥞H\U00015c1fêÔ', right='9Ä\x98\x1fì', alg=Jaccard({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Jaccard({'qval': 1, 'as_set': False, 'external': True}), E left='\U000fe4b2\x8e𗥞H\U00015c1fêÔ', E right='9Ä\x98\x1fì', E ) E Unreliable test timings! On an initial run, this test took 344.73ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.53 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg22] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 95 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 95 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 95 bytes, frozen) function = .run at 0xffffff6f55b6a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 95 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000ab3b5Á', right = 'çÒæ\'1Ï\U0007a403"v\x12[ó\U000bf4ee\x95&Ð' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000ab3b5Á', 'çÒæ\'1Ï\U0007a403"v\x12[ó\U000bf4ee\x95&Ð', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 4884390.881957059, result = None finish = 4884391.203582811, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=321626) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 321.63ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 95 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 321.63ms, which exceeds the deadline of 200.00ms'), 'args = (\'\\U000ab3b5Á\', \'çÒæ\\\'1...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U000ab3b5Á', right='çÒæ\'1Ï\U0007a403"v\x12[ó\U000bf4ee\x95&Ð', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='\U000ab3b5Á', E right='çÒæ\'1Ï\U0007a403"v\x12[ó\U000bf4ee\x95&Ð', E ) E Unreliable test timings! On an initial run, this test took 321.63ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 23.62 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg23] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'T', 'M', 'P', 'I', 'Q', 'X', 'B', 'S', 'K', 'C', 'G', 'R', 'V', 'N', 'Z', 'A', 'F', 'Y', 'L', 'J', 'U', 'O', 'E', 'D'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('~`¨\U000bcc33:k\x8ah\x9f:\U00077f67篱&vÙë|o[2\x95I\U0008d6bdY', '\x87ÒÄ7)_ÿ\U00091fa2¯/\x01\x11\U000ea63b\U00087607\x... 'M', 'P', 'I', 'Q', 'X', 'B', 'S', 'K', 'C', 'G', 'R', 'V', 'N', 'Z', 'A', 'F', 'Y', 'L', 'J', 'U', 'O', 'E', 'D'})})) kwargs = {}, initial_draws = 2, start = 4884399.16196716, result = None finish = 4884399.614532051, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=452565) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 452.56ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalization_range( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'T', 'M', 'P', 'I', 'Q', 'X', 'B', 'S', 'K', 'C', 'G', 'R', 'V', 'N', 'Z', 'A', 'F', 'Y', 'L', 'J', 'U', 'O', 'E', 'D'})}), E left='~`¨\U000bcc33:k\x8ah\x9f:\U00077f67篱&vÙë|o[2\x95I\U0008d6bdY', E right='\x87ÒÄ7)_ÿ\U00091fa2¯/\x01\x11\U000ea63b\U00087607\x12ÕP&𪀟\U000a611d\U0010b9c19', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _______________________ test_normalization_by_one[alg23] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 36 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 36 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 36 bytes, frozen) function = .run at 0xffffff8b24fc40> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 36 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '81', right = '7\U000678bf\U00042083)' alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'C', 'F', 'T', 'K', 'E', 'L', 'M', 'B', 'P', 'V', 'A', 'Z', 'O', 'I', 'U', 'Q', 'R', 'D', 'S', 'G', 'J', 'N', 'Y', 'X'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('81', '7\U000678bf\U00042083)', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'extern... 'F', 'T', 'K', 'E', 'L', 'M', 'B', 'P', 'V', 'A', 'Z', 'O', 'I', 'U', 'Q', 'R', 'D', 'S', 'G', 'J', 'N', 'Y', 'X'})})) kwargs = {}, initial_draws = 2, start = 4884390.879677041, result = None finish = 4884391.197899666, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=318223) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 318.22ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'C', 'F', 'T', 'K', 'E', 'L', 'M', 'B', 'P', 'V', 'A', 'Z', 'O', 'I', 'U', 'Q', 'R', 'D', 'S', 'G', 'J', 'N', 'Y', 'X'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 36 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 318.22ms, which exceeds the deadline of 200.00ms'), "args = ('81', '7\\U000678bf\\U000420...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='81', right='7\U000678bf\U00042083)', alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'C', 'F', 'T', 'K', 'E', 'L', 'M', 'B', 'P', 'V', 'A', 'Z', 'O', 'I', 'U', 'Q', 'R', 'D', 'S', 'G', 'J', 'N', 'Y', 'X'})})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'C', 'F', 'T', 'K', 'E', 'L', 'M', 'B', 'P', 'V', 'A', 'Z', 'O', 'I', 'U', 'Q', 'R', 'D', 'S', 'G', 'J', 'N', 'Y', 'X'})}), E left='81', E right='7\U000678bf\U00042083)', E ) E Unreliable test timings! On an initial run, this test took 318.22ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.35 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_qval[None-JaroWinkler] __________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 30 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 30 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 30 bytes, frozen) function = .run at 0xffffff9a4998a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 30 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U000aa86a\U000aa86a\x88', right = '\U000aa86a\U000aa86a\x88' alg = 'JaroWinkler', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000aa86a\U000aa86a\x88', '\U000aa86a\U000aa86a\x88', 'JaroWinkler', None) kwargs = {}, initial_draws = 2, start = 4884399.888282723, result = None finish = 4884400.197780279, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=309498) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 309.50ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 30 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 309.50ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000aa86a\\U000aa86a\\x88...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\U000aa86a\U000aa86a\x88', right='\U000aa86a\U000aa86a\x88', alg='JaroWinkler', qval=None) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=None, E left='\U000aa86a\U000aa86a\x88', E right='\U000aa86a\U000aa86a\x88', E ) E Unreliable test timings! On an initial run, this test took 309.50ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.36 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_normalization_range ___________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 35 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 35 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 35 bytes, frozen) function = .run at 0xffffff6f513e20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 35 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ text = '\U000bc593uæu/u\xa0' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000bc593uæu/u\xa0',), kwargs = {}, initial_draws = 1 start = 4884404.717995546, result = None, finish = 4884405.00581173 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=287816) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 287.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 35 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 287.82ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000bc593uæu/u\\xa0',), k...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(text='\U000bc593uæu/u\xa0') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E text='\U000bc593uæu/u\xa0', E ) E Unreliable test timings! On an initial run, this test took 287.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.98 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg4] _________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffff9bbc89a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '', alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 4884406.400332895, result = None, finish = 4884406.80298529 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=402652) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 402.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 402.65ms, which exceeds the deadline of 200.00ms'), "args = ('', '', ZLIBNCD({})), kwargs...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='', alg=ZLIBNCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=ZLIBNCD({}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 402.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.08 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg5] _________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 6 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 6 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) function = .run at 0xffffff9bbf23e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 6 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '0', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '0', SqrtNCD({'qval': 1})), kwargs = {}, initial_draws = 2 start = 4884411.01727273, result = None, finish = 4884411.282334333 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=265062) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 265.06ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 265.06ms, which exceeds the deadline of 200.00ms'), "args = ('', '0', SqrtNCD({'qval': 1}...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='0', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=SqrtNCD({'qval': 1}), E left='', E right='0', E ) E Unreliable test timings! On an initial run, this test took 265.06ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.82 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg0] ______________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 62 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 62 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 62 bytes, frozen) function = .run at 0xffffff7c8fdb20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 62 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'äàp\x0c\U0008a57c', right = '³\x9boà£Ø\x89\t' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('äàp\x0c\U0008a57c', '³\x9boà£Ø\x89\t', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 4884405.261561659, result = None finish = 4884405.543157793, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=281596) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 281.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 62 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 281.60ms, which exceeds the deadline of 200.00ms'), "args = ('äàp\\x0c\\U0008a57c', '³\\x...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='äàp\x0c\U0008a57c', right='³\x9boà£Ø\x89\t', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='äàp\x0c\U0008a57c', E right='³\x9boà£Ø\x89\t', E ) E Unreliable test timings! On an initial run, this test took 281.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 88.82 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg1] ______________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 23 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 23 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 23 bytes, frozen) function = .run at 0xffffff7c5f6660> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 23 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = 'åuþ皈', alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'åuþ皈', BWTRLENCD({'terminator': '\x00'})), kwargs = {} initial_draws = 2, start = 4884418.438030413, result = None finish = 4884418.805980832, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=367950) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 367.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 23 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 367.95ms, which exceeds the deadline of 200.00ms'), "args = ('', 'åuþ皈', BWTRLENCD({'term...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='åuþ皈', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=BWTRLENCD({'terminator': '\x00'}), E left='', E right='åuþ皈', E ) E Unreliable test timings! On an initial run, this test took 367.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.71 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg0] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 44 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 44 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 44 bytes, frozen) function = .run at 0xffffff674e6340> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 44 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = 'õÃ\x0f»µ\x9a^Ùe' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'õÃ\x0f»µ\x9a^Ùe', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 4884415.974814367, result = None finish = 4884416.398113426, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=423299) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 423.30ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 44 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 423.30ms, which exceeds the deadline of 200.00ms'), "args = ('', 'õÃ\\x0f»µ\\x9a^Ùe', Ari...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='õÃ\x0f»µ\x9a^Ùe', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='', E right='õÃ\x0f»µ\x9a^Ùe', E ) E Unreliable test timings! On an initial run, this test took 423.30ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 22.61 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg6] _________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 5 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 5 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) function = .run at 0xffffff9bbf13a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 5 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = 'j', right = '', alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('j', '', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})), kwargs = {} initial_draws = 2, start = 4884417.78994457, result = None finish = 4884418.099491226, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=309547) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 309.55ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 309.55ms, which exceeds the deadline of 200.00ms'), "args = ('j', '', EntropyNCD({'qval':...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='j', right='', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='j', E right='', E ) E Unreliable test timings! On an initial run, this test took 309.55ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.81 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_qval[1-JaroWinkler] ___________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 46 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 46 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 46 bytes, frozen) function = .run at 0xffffff9a703ec0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 46 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '¼\U0004627f«\x80ƹ', right = 'Î)', alg = 'JaroWinkler', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¼\U0004627f«\x80ƹ', 'Î)', 'JaroWinkler', 1), kwargs = {} initial_draws = 2, start = 4884418.429610346, result = None finish = 4884418.795930752, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=366320) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 366.32ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 46 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 366.32ms, which exceeds the deadline of 200.00ms'), "args = ('¼\\U0004627f«\\x80ƹ', 'Î)'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='¼\U0004627f«\x80ƹ', right='Î)', alg='JaroWinkler', qval=1) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=1, E left='¼\U0004627f«\x80ƹ', E right='Î)', E ) E Unreliable test timings! On an initial run, this test took 366.32ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.56 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg2] ______________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffff7c5f7e20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '', alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 4884422.149608163, result = None, finish = 4884422.410605534 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=260997) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 261.00ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 261.00ms, which exceeds the deadline of 200.00ms'), "args = ('', '', BZ2NCD({})), kwargs ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='', alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=BZ2NCD({}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 261.00ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 33.66 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg2] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 52 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 52 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 52 bytes, frozen) function = .run at 0xffffff67369580> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 52 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '\U00045727', right = "Oà£'µ\U00083fcf,𦮙", alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00045727', "Oà£'µ\U00083fcf,𦮙", BZ2NCD({})), kwargs = {} initial_draws = 2, start = 4884428.618616794, result = None finish = 4884429.009556296, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=390940) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 390.94ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 52 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 390.94ms, which exceeds the deadline of 200.00ms'), 'args = (\'\\U00045727\', "Oà£\'µ\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='\U00045727', right="Oà£'µ\U00083fcf,𦮙", alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=BZ2NCD({}), E left='\U00045727', E right="Oà£'µ\U00083fcf,𦮙", E ) E Unreliable test timings! On an initial run, this test took 390.94ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 15.73 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg3] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 self = data = ConjectureData(INTERESTING, 27 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 27 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 27 bytes, frozen) function = .run at 0xffffff672804a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 27 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ left = '', right = '~\U000b0d02\U00085526', alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '~\U000b0d02\U00085526', RLENCD({'qval': 1})), kwargs = {} initial_draws = 2, start = 4884433.344223291, result = None finish = 4884433.599603518, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=255380) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 255.38ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 27 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 255.38ms, which exceeds the deadline of 200.00ms'), "args = ('', '~\\U000b0d02\\U00085526...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='~\U000b0d02\U00085526', alg=RLENCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=RLENCD({'qval': 1}), E left='', E right='~\U000b0d02\U00085526', E ) E Unreliable test timings! On an initial run, this test took 255.38ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.75 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky =========================== short test summary info ============================ FAILED tests/test_common.py::test_normalization_range[alg0] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg1] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg1] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg2] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg15] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg16] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg16] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg5] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg6] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg8] - hypothesis.error... FAILED tests/test_common.py::test_normalization_same[alg9] - hypothesis.error... FAILED tests/test_common.py::test_normalization_same[alg21] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg21] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg10] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg22] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg23] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg23] - hypothesis.er... FAILED tests/test_external.py::test_qval[None-JaroWinkler] - hypothesis.error... FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg4] - ... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg5] - ... FAILED tests/test_compression/test_common.py::test_simmetry[alg0] - hypothesi... FAILED tests/test_compression/test_common.py::test_simmetry[alg1] - hypothesi... FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] - hypo... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] - ... FAILED tests/test_external.py::test_qval[1-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_compression/test_common.py::test_simmetry[alg2] - hypothesi... FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] - hypo... FAILED tests/test_compression/test_common.py::test_is_normalized[alg3] - hypo... ================== 29 failed, 383 passed in 199.74s (0:03:19) ================== RPM build errors: error: Bad exit status from /var/tmp/rpm-tmp.RfbjmR (%check) Bad exit status from /var/tmp/rpm-tmp.RfbjmR (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec