Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2671486-67414/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1712016000 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2671486-67414/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1712016000 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.CXrt89 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.ISpqDd + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2671486-67414/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1712016000 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.IRylrh + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.TRAggw + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 69.0.3) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 69.0.3) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2671486-67414/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1712016000 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.JmjhO6 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.8kulcn + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 69.0.3) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 69.0.3) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2671486-67414/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1712016000 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.9x9q0P + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 69.0.3) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 69.0.3) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.bLU06U + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing /builddir/build/BUILD/textdistance-4.6.1 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) running dist_info creating /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance.egg-info writing /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance.egg-info/PKG-INFO writing dependency_links to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance.egg-info/dependency_links.txt writing requirements to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance.egg-info/requires.txt writing top-level names to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance.egg-info/top_level.txt writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance.egg-info/SOURCES.txt' reading manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-lj0y5qmc/textdistance-4.6.1.dist-info' Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: textdistance Building wheel for textdistance (pyproject.toml): started Running command Building wheel for textdistance (pyproject.toml) running bdist_wheel running build running build_py creating build creating build/lib creating build/lib/textdistance copying textdistance/benchmark.py -> build/lib/textdistance copying textdistance/libraries.py -> build/lib/textdistance copying textdistance/__init__.py -> build/lib/textdistance copying textdistance/utils.py -> build/lib/textdistance creating build/lib/textdistance/algorithms copying textdistance/algorithms/vector_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/compression_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/simple.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/sequence_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/base.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/edit_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/types.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/__init__.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/phonetic.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/token_based.py -> build/lib/textdistance/algorithms copying textdistance/py.typed -> build/lib/textdistance copying textdistance/libraries.json -> build/lib/textdistance installing to build/bdist.linux-riscv64/wheel running install running install_lib creating build/bdist.linux-riscv64 creating build/bdist.linux-riscv64/wheel creating build/bdist.linux-riscv64/wheel/textdistance creating build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/vector_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/compression_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/simple.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/sequence_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/base.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/edit_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/types.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/phonetic.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/token_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/libraries.json -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/benchmark.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/py.typed -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/libraries.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/utils.py -> build/bdist.linux-riscv64/wheel/textdistance running install_egg_info running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Copying textdistance.egg-info to build/bdist.linux-riscv64/wheel/textdistance-4.6.1-py3.12.egg-info running install_scripts creating build/bdist.linux-riscv64/wheel/textdistance-4.6.1.dist-info/WHEEL creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-wheel-flaswpoz/.tmp-sooihxcr/textdistance-4.6.1-py3-none-any.whl' and adding 'build/bdist.linux-riscv64/wheel' to it adding 'textdistance/__init__.py' adding 'textdistance/benchmark.py' adding 'textdistance/libraries.json' adding 'textdistance/libraries.py' adding 'textdistance/py.typed' adding 'textdistance/utils.py' adding 'textdistance/algorithms/__init__.py' adding 'textdistance/algorithms/base.py' adding 'textdistance/algorithms/compression_based.py' adding 'textdistance/algorithms/edit_based.py' adding 'textdistance/algorithms/phonetic.py' adding 'textdistance/algorithms/sequence_based.py' adding 'textdistance/algorithms/simple.py' adding 'textdistance/algorithms/token_based.py' adding 'textdistance/algorithms/types.py' adding 'textdistance/algorithms/vector_based.py' adding 'textdistance-4.6.1.dist-info/LICENSE' adding 'textdistance-4.6.1.dist-info/METADATA' adding 'textdistance-4.6.1.dist-info/WHEEL' adding 'textdistance-4.6.1.dist-info/top_level.txt' adding 'textdistance-4.6.1.dist-info/RECORD' removing build/bdist.linux-riscv64/wheel Building wheel for textdistance (pyproject.toml): finished with status 'done' Created wheel for textdistance: filename=textdistance-4.6.1-py3-none-any.whl size=31183 sha256=e2a05e7f2d532f18455b4ec22fd6bd68655f12b4bf7a6157ec01a3c1ef18f182 Stored in directory: /builddir/.cache/pip/wheels/af/08/72/d6baf94a0831066222f63a4e4a469ea938a661b7e8974b7b68 Successfully built textdistance + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.rwGaWj + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 ++ xargs basename --multiple ++ ls /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=textdistance==4.6.1 + '[' -z textdistance==4.6.1 ']' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir textdistance==4.6.1 Using pip 23.3.2 from /usr/lib/python3.12/site-packages/pip (python 3.12) Looking in links: /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing ./pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl Installing collected packages: textdistance Successfully installed textdistance-4.6.1 + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + site_dirs+=("/usr/lib/python3.12/site-packages") + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages '!=' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --record /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED' ++ wc -l /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo ++ cut -f1 '-d ' + lines=1 + '[' 1 -ne 1 ']' + RPM_PERCENTAGES_COUNT=2 + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --sitelib /usr/lib/python3.12/site-packages --sitearch /usr/lib64/python3.12/site-packages --python-version 3.12 --pyproject-record /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record --prefix /usr -l textdistance + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 4.6.1-1.fc40 --unique-debug-suffix -4.6.1-1.fc40.noarch --unique-debug-src-base python-textdistance-4.6.1-1.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/textdistance-4.6.1 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.e3J1OY + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + k='not test_compare[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest -v -k 'not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' -n auto ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.4.3, pluggy-1.3.0 -- /usr/bin/python3 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/builddir/build/BUILD/textdistance-4.6.1/.hypothesis/examples') rootdir: /builddir/build/BUILD/textdistance-4.6.1 configfile: setup.cfg plugins: hypothesis-6.82.0, xdist-3.5.0 created: 8/8 workers 8 workers [412 items] scheduling tests via LoadScheduling tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_monotonic[alg0] tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_monotonic[alg12] tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_range[alg12] [gw0] [ 0%] FAILED tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_range[alg1] [gw4] [ 0%] FAILED tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_same[alg1] [gw2] [ 0%] FAILED tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_by_one[alg1] [gw5] [ 0%] PASSED tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_same[alg13] [gw1] [ 1%] FAILED tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_range[alg13] [gw6] [ 1%] PASSED tests/test_common.py::test_normalization_monotonic[alg0] tests/test_common.py::test_normalization_monotonic[alg1] [gw2] [ 1%] FAILED tests/test_common.py::test_normalization_by_one[alg1] tests/test_common.py::test_normalization_by_one[alg2] [gw0] [ 1%] FAILED tests/test_common.py::test_normalization_range[alg1] tests/test_common.py::test_normalization_range[alg2] [gw7] [ 2%] PASSED tests/test_common.py::test_normalization_monotonic[alg12] tests/test_common.py::test_normalization_monotonic[alg13] [gw4] [ 2%] PASSED tests/test_common.py::test_normalization_same[alg1] tests/test_common.py::test_normalization_same[alg2] [gw5] [ 2%] PASSED tests/test_common.py::test_normalization_same[alg13] tests/test_common.py::test_normalization_same[alg14] [gw0] [ 2%] FAILED tests/test_common.py::test_normalization_range[alg2] tests/test_common.py::test_normalization_range[alg3] [gw3] [ 3%] FAILED tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_by_one[alg13] [gw1] [ 3%] FAILED tests/test_common.py::test_normalization_range[alg13] tests/test_common.py::test_normalization_range[alg14] [gw2] [ 3%] FAILED tests/test_common.py::test_normalization_by_one[alg2] tests/test_common.py::test_normalization_by_one[alg3] [gw2] [ 3%] FAILED tests/test_common.py::test_normalization_by_one[alg3] tests/test_common.py::test_normalization_by_one[alg4] [gw6] [ 4%] PASSED tests/test_common.py::test_normalization_monotonic[alg1] tests/test_common.py::test_normalization_monotonic[alg2] [gw4] [ 4%] PASSED tests/test_common.py::test_normalization_same[alg2] tests/test_common.py::test_normalization_same[alg3] [gw7] [ 4%] PASSED tests/test_common.py::test_normalization_monotonic[alg13] tests/test_common.py::test_normalization_monotonic[alg14] [gw2] [ 4%] FAILED tests/test_common.py::test_normalization_by_one[alg4] tests/test_common.py::test_normalization_by_one[alg5] [gw1] [ 5%] PASSED tests/test_common.py::test_normalization_range[alg14] tests/test_common.py::test_normalization_range[alg15] [gw2] [ 5%] FAILED tests/test_common.py::test_normalization_by_one[alg5] tests/test_common.py::test_normalization_by_one[alg6] [gw4] [ 5%] FAILED tests/test_common.py::test_normalization_same[alg3] tests/test_common.py::test_normalization_same[alg4] [gw3] [ 5%] FAILED tests/test_common.py::test_normalization_by_one[alg13] [gw5] [ 6%] FAILED tests/test_common.py::test_normalization_same[alg14] tests/test_common.py::test_normalization_by_one[alg14] tests/test_common.py::test_normalization_same[alg15] [gw6] [ 6%] PASSED tests/test_common.py::test_normalization_monotonic[alg2] tests/test_common.py::test_normalization_monotonic[alg3] [gw0] [ 6%] FAILED tests/test_common.py::test_normalization_range[alg3] tests/test_common.py::test_normalization_range[alg4] [gw3] [ 6%] FAILED tests/test_common.py::test_normalization_by_one[alg14] tests/test_common.py::test_normalization_by_one[alg15] [gw1] [ 7%] FAILED tests/test_common.py::test_normalization_range[alg15] tests/test_common.py::test_normalization_range[alg16] [gw3] [ 7%] FAILED tests/test_common.py::test_normalization_by_one[alg15] [gw7] [ 7%] PASSED tests/test_common.py::test_normalization_monotonic[alg14] tests/test_common.py::test_normalization_by_one[alg16] tests/test_common.py::test_normalization_monotonic[alg15] [gw4] [ 7%] PASSED tests/test_common.py::test_normalization_same[alg4] tests/test_common.py::test_normalization_same[alg5] [gw2] [ 8%] FAILED tests/test_common.py::test_normalization_by_one[alg6] tests/test_common.py::test_normalization_by_one[alg7] [gw5] [ 8%] PASSED tests/test_common.py::test_normalization_same[alg15] tests/test_common.py::test_normalization_same[alg16] [gw6] [ 8%] PASSED tests/test_common.py::test_normalization_monotonic[alg3] tests/test_common.py::test_normalization_monotonic[alg4] [gw0] [ 8%] FAILED tests/test_common.py::test_normalization_range[alg4] tests/test_common.py::test_normalization_range[alg5] [gw3] [ 8%] FAILED tests/test_common.py::test_normalization_by_one[alg16] tests/test_common.py::test_normalization_by_one[alg17] [gw1] [ 9%] FAILED tests/test_common.py::test_normalization_range[alg16] tests/test_common.py::test_normalization_range[alg17] [gw2] [ 9%] FAILED tests/test_common.py::test_normalization_by_one[alg7] tests/test_common.py::test_normalization_by_one[alg8] [gw4] [ 9%] PASSED tests/test_common.py::test_normalization_same[alg5] tests/test_common.py::test_normalization_same[alg6] [gw5] [ 9%] PASSED tests/test_common.py::test_normalization_same[alg16] tests/test_common.py::test_normalization_same[alg17] [gw7] [ 10%] PASSED tests/test_common.py::test_normalization_monotonic[alg15] tests/test_common.py::test_normalization_monotonic[alg16] [gw4] [ 10%] FAILED tests/test_common.py::test_normalization_same[alg6] tests/test_common.py::test_normalization_same[alg7] [gw0] [ 10%] PASSED tests/test_common.py::test_normalization_range[alg5] tests/test_common.py::test_normalization_range[alg6] [gw1] [ 10%] PASSED tests/test_common.py::test_normalization_range[alg17] tests/test_common.py::test_normalization_range[alg18] [gw6] [ 11%] PASSED tests/test_common.py::test_normalization_monotonic[alg4] tests/test_common.py::test_normalization_monotonic[alg5] [gw4] [ 11%] FAILED tests/test_common.py::test_normalization_same[alg7] tests/test_common.py::test_normalization_same[alg8] [gw2] [ 11%] FAILED tests/test_common.py::test_normalization_by_one[alg8] tests/test_common.py::test_normalization_by_one[alg9] [gw3] [ 11%] FAILED tests/test_common.py::test_normalization_by_one[alg17] tests/test_common.py::test_normalization_by_one[alg18] [gw0] [ 12%] FAILED tests/test_common.py::test_normalization_range[alg6] tests/test_common.py::test_normalization_range[alg7] [gw5] [ 12%] FAILED tests/test_common.py::test_normalization_same[alg17] tests/test_common.py::test_normalization_same[alg18] [gw3] [ 12%] FAILED tests/test_common.py::test_normalization_by_one[alg18] [gw1] [ 12%] FAILED tests/test_common.py::test_normalization_range[alg18] tests/test_common.py::test_normalization_range[alg19] tests/test_common.py::test_normalization_by_one[alg19] [gw4] [ 13%] FAILED tests/test_common.py::test_normalization_same[alg8] tests/test_common.py::test_normalization_same[alg9] [gw7] [ 13%] PASSED tests/test_common.py::test_normalization_monotonic[alg16] tests/test_common.py::test_normalization_monotonic[alg17] [gw6] [ 13%] PASSED tests/test_common.py::test_normalization_monotonic[alg5] tests/test_common.py::test_normalization_monotonic[alg6] [gw5] [ 13%] PASSED tests/test_common.py::test_normalization_same[alg18] tests/test_common.py::test_normalization_same[alg19] [gw1] [ 14%] PASSED tests/test_common.py::test_normalization_range[alg19] tests/test_common.py::test_normalization_range[alg20] [gw4] [ 14%] FAILED tests/test_common.py::test_normalization_same[alg9] tests/test_common.py::test_normalization_same[alg10] [gw2] [ 14%] FAILED tests/test_common.py::test_normalization_by_one[alg9] tests/test_common.py::test_normalization_by_one[alg10] [gw3] [ 14%] PASSED tests/test_common.py::test_normalization_by_one[alg19] tests/test_common.py::test_normalization_by_one[alg20] [gw0] [ 15%] FAILED tests/test_common.py::test_normalization_range[alg7] tests/test_common.py::test_normalization_range[alg8] [gw7] [ 15%] PASSED tests/test_common.py::test_normalization_monotonic[alg17] tests/test_common.py::test_normalization_monotonic[alg18] [gw6] [ 15%] PASSED tests/test_common.py::test_normalization_monotonic[alg6] tests/test_common.py::test_normalization_monotonic[alg7] [gw4] [ 15%] PASSED tests/test_common.py::test_normalization_same[alg10] tests/test_common.py::test_normalization_same[alg11] [gw5] [ 16%] FAILED tests/test_common.py::test_normalization_same[alg19] tests/test_common.py::test_normalization_same[alg20] [gw1] [ 16%] PASSED tests/test_common.py::test_normalization_range[alg20] tests/test_common.py::test_normalization_range[alg21] [gw2] [ 16%] PASSED tests/test_common.py::test_normalization_by_one[alg10] tests/test_common.py::test_normalization_by_one[alg11] [gw2] [ 16%] FAILED tests/test_common.py::test_normalization_by_one[alg11] tests/test_common.py::test_no_common_chars[alg18] [gw2] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg18] tests/test_common.py::test_no_common_chars[alg19] [gw2] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg19] tests/test_common.py::test_no_common_chars[alg20] [gw2] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg20] tests/test_common.py::test_no_common_chars[alg21] [gw2] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg21] tests/test_common.py::test_no_common_chars[alg22] [gw2] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg22] tests/test_common.py::test_no_common_chars[alg23] [gw2] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg23] tests/test_common.py::test_empty[alg0] [gw2] [ 18%] PASSED tests/test_common.py::test_empty[alg0] tests/test_common.py::test_empty[alg1] [gw2] [ 18%] PASSED tests/test_common.py::test_empty[alg1] tests/test_common.py::test_empty[alg2] [gw2] [ 18%] PASSED tests/test_common.py::test_empty[alg2] tests/test_common.py::test_empty[alg3] [gw2] [ 19%] PASSED tests/test_common.py::test_empty[alg3] tests/test_common.py::test_empty[alg4] [gw2] [ 19%] PASSED tests/test_common.py::test_empty[alg4] tests/test_common.py::test_empty[alg5] [gw2] [ 19%] PASSED tests/test_common.py::test_empty[alg5] tests/test_common.py::test_empty[alg6] [gw0] [ 19%] PASSED tests/test_common.py::test_normalization_range[alg8] [gw2] [ 20%] PASSED tests/test_common.py::test_empty[alg6] [gw7] [ 20%] PASSED tests/test_common.py::test_normalization_monotonic[alg18] tests/test_common.py::test_empty[alg7] [gw2] [ 20%] PASSED tests/test_common.py::test_empty[alg7] tests/test_common.py::test_normalization_monotonic[alg19] tests/test_common.py::test_normalization_range[alg9] tests/test_common.py::test_empty[alg8] [gw2] [ 20%] PASSED tests/test_common.py::test_empty[alg8] tests/test_common.py::test_empty[alg9] [gw2] [ 21%] PASSED tests/test_common.py::test_empty[alg9] tests/test_common.py::test_empty[alg10] [gw2] [ 21%] PASSED tests/test_common.py::test_empty[alg10] tests/test_common.py::test_empty[alg11] [gw2] [ 21%] PASSED tests/test_common.py::test_empty[alg11] tests/test_common.py::test_empty[alg12] [gw3] [ 21%] PASSED tests/test_common.py::test_normalization_by_one[alg20] [gw2] [ 22%] PASSED tests/test_common.py::test_empty[alg12] tests/test_common.py::test_normalization_by_one[alg21] tests/test_common.py::test_empty[alg13] [gw2] [ 22%] PASSED tests/test_common.py::test_empty[alg13] tests/test_common.py::test_empty[alg14] [gw2] [ 22%] PASSED tests/test_common.py::test_empty[alg14] tests/test_common.py::test_empty[alg15] [gw2] [ 22%] PASSED tests/test_common.py::test_empty[alg15] tests/test_common.py::test_empty[alg16] [gw2] [ 23%] PASSED tests/test_common.py::test_empty[alg16] tests/test_common.py::test_empty[alg17] [gw4] [ 23%] PASSED tests/test_common.py::test_normalization_same[alg11] tests/test_common.py::test_no_common_chars[alg0] [gw2] [ 23%] PASSED tests/test_common.py::test_empty[alg17] tests/test_common.py::test_empty[alg18] [gw2] [ 23%] PASSED tests/test_common.py::test_empty[alg18] tests/test_common.py::test_empty[alg19] [gw2] [ 24%] PASSED tests/test_common.py::test_empty[alg19] [gw4] [ 24%] PASSED tests/test_common.py::test_no_common_chars[alg0] tests/test_common.py::test_empty[alg20] tests/test_common.py::test_no_common_chars[alg1] [gw2] [ 24%] PASSED tests/test_common.py::test_empty[alg20] tests/test_common.py::test_empty[alg21] [gw2] [ 24%] PASSED tests/test_common.py::test_empty[alg21] tests/test_common.py::test_empty[alg22] [gw2] [ 25%] PASSED tests/test_common.py::test_empty[alg22] tests/test_common.py::test_empty[alg23] [gw1] [ 25%] FAILED tests/test_common.py::test_normalization_range[alg21] [gw2] [ 25%] PASSED tests/test_common.py::test_empty[alg23] tests/test_common.py::test_normalization_range[alg22] [gw5] [ 25%] PASSED tests/test_common.py::test_normalization_same[alg20] tests/test_common.py::test_unequal_distance[alg0] [gw2] [ 25%] PASSED tests/test_common.py::test_unequal_distance[alg0] tests/test_common.py::test_unequal_distance[alg1] tests/test_common.py::test_normalization_same[alg21] [gw2] [ 26%] PASSED tests/test_common.py::test_unequal_distance[alg1] tests/test_common.py::test_unequal_distance[alg2] [gw2] [ 26%] PASSED tests/test_common.py::test_unequal_distance[alg2] tests/test_common.py::test_unequal_distance[alg3] [gw2] [ 26%] PASSED tests/test_common.py::test_unequal_distance[alg3] tests/test_common.py::test_unequal_distance[alg4] [gw2] [ 26%] PASSED tests/test_common.py::test_unequal_distance[alg4] tests/test_common.py::test_unequal_distance[alg5] [gw2] [ 27%] PASSED tests/test_common.py::test_unequal_distance[alg5] tests/test_common.py::test_unequal_distance[alg6] [gw2] [ 27%] PASSED tests/test_common.py::test_unequal_distance[alg6] tests/test_common.py::test_unequal_distance[alg7] [gw2] [ 27%] PASSED tests/test_common.py::test_unequal_distance[alg7] tests/test_common.py::test_unequal_distance[alg8] [gw2] [ 27%] PASSED tests/test_common.py::test_unequal_distance[alg8] [gw4] [ 28%] PASSED tests/test_common.py::test_no_common_chars[alg1] tests/test_common.py::test_unequal_distance[alg9] [gw2] [ 28%] PASSED tests/test_common.py::test_unequal_distance[alg9] tests/test_common.py::test_no_common_chars[alg2] tests/test_common.py::test_unequal_distance[alg10] [gw2] [ 28%] PASSED tests/test_common.py::test_unequal_distance[alg10] tests/test_common.py::test_unequal_distance[alg11] [gw4] [ 28%] PASSED tests/test_common.py::test_no_common_chars[alg2] tests/test_common.py::test_no_common_chars[alg3] [gw2] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg11] tests/test_common.py::test_unequal_distance[alg12] [gw6] [ 29%] PASSED tests/test_common.py::test_normalization_monotonic[alg7] tests/test_common.py::test_normalization_monotonic[alg8] [gw4] [ 29%] PASSED tests/test_common.py::test_no_common_chars[alg3] tests/test_common.py::test_no_common_chars[alg4] [gw2] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg12] [gw4] [ 30%] PASSED tests/test_common.py::test_no_common_chars[alg4] tests/test_common.py::test_unequal_distance[alg13] tests/test_common.py::test_no_common_chars[alg5] [gw4] [ 30%] PASSED tests/test_common.py::test_no_common_chars[alg5] [gw2] [ 30%] PASSED tests/test_common.py::test_unequal_distance[alg13] tests/test_common.py::test_no_common_chars[alg6] tests/test_common.py::test_unequal_distance[alg14] [gw4] [ 30%] PASSED tests/test_common.py::test_no_common_chars[alg6] [gw2] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg14] tests/test_common.py::test_unequal_distance[alg15] tests/test_common.py::test_no_common_chars[alg7] [gw4] [ 31%] PASSED tests/test_common.py::test_no_common_chars[alg7] [gw2] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg15] tests/test_common.py::test_unequal_distance[alg16] [gw2] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg16] tests/test_common.py::test_unequal_distance[alg17] [gw2] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg17] tests/test_common.py::test_no_common_chars[alg8] [gw4] [ 32%] PASSED tests/test_common.py::test_no_common_chars[alg8] tests/test_common.py::test_unequal_distance[alg18] tests/test_common.py::test_no_common_chars[alg9] [gw2] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg18] tests/test_common.py::test_unequal_distance[alg19] [gw2] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg19] tests/test_common.py::test_unequal_distance[alg20] [gw4] [ 33%] PASSED tests/test_common.py::test_no_common_chars[alg9] [gw2] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg20] tests/test_common.py::test_no_common_chars[alg10] [gw4] [ 33%] PASSED tests/test_common.py::test_no_common_chars[alg10] tests/test_common.py::test_no_common_chars[alg11] [gw4] [ 33%] PASSED tests/test_common.py::test_no_common_chars[alg11] tests/test_common.py::test_unequal_distance[alg21] tests/test_common.py::test_no_common_chars[alg12] [gw4] [ 33%] PASSED tests/test_common.py::test_no_common_chars[alg12] [gw2] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg21] tests/test_common.py::test_unequal_distance[alg22] tests/test_common.py::test_no_common_chars[alg13] [gw4] [ 34%] PASSED tests/test_common.py::test_no_common_chars[alg13] tests/test_common.py::test_no_common_chars[alg14] [gw4] [ 34%] PASSED tests/test_common.py::test_no_common_chars[alg14] [gw2] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg22] tests/test_common.py::test_unequal_distance[alg23] tests/test_common.py::test_no_common_chars[alg15] [gw2] [ 35%] PASSED tests/test_common.py::test_unequal_distance[alg23] [gw4] [ 35%] PASSED tests/test_common.py::test_no_common_chars[alg15] tests/test_external.py::test_compare[Jaro] tests/test_common.py::test_no_common_chars[alg16] [gw4] [ 35%] PASSED tests/test_common.py::test_no_common_chars[alg16] tests/test_common.py::test_no_common_chars[alg17] [gw4] [ 35%] PASSED tests/test_common.py::test_no_common_chars[alg17] tests/test_external.py::test_qval[2-JaroWinkler] [gw0] [ 36%] FAILED tests/test_common.py::test_normalization_range[alg9] tests/test_common.py::test_normalization_range[alg10] [gw3] [ 36%] FAILED tests/test_common.py::test_normalization_by_one[alg21] tests/test_common.py::test_normalization_by_one[alg22] [gw4] [ 36%] FAILED tests/test_external.py::test_qval[2-JaroWinkler] tests/test_external.py::test_qval[3-Jaro] [gw1] [ 36%] PASSED tests/test_common.py::test_normalization_range[alg22] tests/test_common.py::test_normalization_range[alg23] [gw7] [ 37%] PASSED tests/test_common.py::test_normalization_monotonic[alg19] tests/test_common.py::test_normalization_monotonic[alg20] [gw0] [ 37%] FAILED tests/test_common.py::test_normalization_range[alg10] tests/test_common.py::test_normalization_range[alg11] [gw3] [ 37%] FAILED tests/test_common.py::test_normalization_by_one[alg22] tests/test_common.py::test_normalization_by_one[alg23] [gw6] [ 37%] PASSED tests/test_common.py::test_normalization_monotonic[alg8] tests/test_common.py::test_normalization_monotonic[alg9] [gw2] [ 38%] PASSED tests/test_external.py::test_compare[Jaro] tests/test_external.py::test_compare[JaroWinkler] [gw5] [ 38%] FAILED tests/test_common.py::test_normalization_same[alg21] tests/test_common.py::test_normalization_same[alg22] [gw0] [ 38%] FAILED tests/test_common.py::test_normalization_range[alg11] tests/test_compression/test_common.py::test_simmetry[alg1] [gw1] [ 38%] FAILED tests/test_common.py::test_normalization_range[alg23] tests/test_compression/test_arith_ncd.py::test_arith_output [gw1] [ 39%] PASSED tests/test_compression/test_arith_ncd.py::test_arith_output tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] [gw1] [ 39%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] [gw1] [ 39%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] [gw1] [ 39%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] [gw1] [ 40%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] tests/test_compression/test_common.py::test_monotonicity[alg0] [gw1] [ 40%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg0] tests/test_compression/test_common.py::test_monotonicity[alg1] [gw4] [ 40%] FAILED tests/test_external.py::test_qval[3-Jaro] tests/test_external.py::test_qval[3-JaroWinkler] [gw1] [ 40%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg1] tests/test_compression/test_common.py::test_monotonicity[alg2] [gw1] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg2] tests/test_compression/test_common.py::test_monotonicity[alg3] [gw1] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg3] tests/test_compression/test_common.py::test_monotonicity[alg4] [gw1] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg4] tests/test_compression/test_common.py::test_monotonicity[alg5] [gw1] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg5] tests/test_compression/test_common.py::test_monotonicity[alg6] [gw1] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg6] tests/test_compression/test_common.py::test_simmetry[alg0] [gw7] [ 42%] PASSED tests/test_common.py::test_normalization_monotonic[alg20] tests/test_common.py::test_normalization_monotonic[alg21] [gw6] [ 42%] PASSED tests/test_common.py::test_normalization_monotonic[alg9] tests/test_common.py::test_normalization_monotonic[alg10] [gw5] [ 42%] FAILED tests/test_common.py::test_normalization_same[alg22] tests/test_common.py::test_normalization_same[alg23] [gw0] [ 42%] FAILED tests/test_compression/test_common.py::test_simmetry[alg1] [gw2] [ 43%] PASSED tests/test_external.py::test_compare[JaroWinkler] tests/test_compression/test_common.py::test_simmetry[alg2] tests/test_external.py::test_qval[None-Jaro] [gw3] [ 43%] FAILED tests/test_common.py::test_normalization_by_one[alg23] tests/test_compression/test_common.py::test_is_normalized[alg6] [gw4] [ 43%] PASSED tests/test_external.py::test_qval[3-JaroWinkler] tests/test_external.py::test_list_of_numbers[Jaro] [gw2] [ 43%] FAILED tests/test_external.py::test_qval[None-Jaro] tests/test_external.py::test_qval[None-JaroWinkler] [gw7] [ 44%] PASSED tests/test_common.py::test_normalization_monotonic[alg21] tests/test_common.py::test_normalization_monotonic[alg22] [gw1] [ 44%] FAILED tests/test_compression/test_common.py::test_simmetry[alg0] tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor [gw3] [ 44%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg6] tests/test_compression/test_common.py::test_normalized_by_one[alg0] [gw2] [ 44%] FAILED tests/test_external.py::test_qval[None-JaroWinkler] tests/test_external.py::test_qval[1-Jaro] [gw6] [ 45%] PASSED tests/test_common.py::test_normalization_monotonic[alg10] tests/test_common.py::test_normalization_monotonic[alg11] [gw2] [ 45%] FAILED tests/test_external.py::test_qval[1-Jaro] tests/test_external.py::test_qval[1-JaroWinkler] [gw5] [ 45%] FAILED tests/test_common.py::test_normalization_same[alg23] tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor [gw0] [ 45%] FAILED tests/test_compression/test_common.py::test_simmetry[alg2] tests/test_compression/test_common.py::test_simmetry[alg3] [gw4] [ 46%] PASSED tests/test_external.py::test_list_of_numbers[Jaro] tests/test_external.py::test_list_of_numbers[JaroWinkler] [gw1] [ 46%] FAILED tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor [gw0] [ 46%] FAILED tests/test_compression/test_common.py::test_simmetry[alg3] tests/test_compression/test_common.py::test_simmetry[alg4] [gw7] [ 46%] PASSED tests/test_common.py::test_normalization_monotonic[alg22] tests/test_common.py::test_normalization_monotonic[alg23] [gw3] [ 47%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg0] [gw5] [ 47%] PASSED tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor [gw0] [ 47%] FAILED tests/test_compression/test_common.py::test_simmetry[alg4] tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor tests/test_compression/test_common.py::test_simmetry[alg5] tests/test_compression/test_common.py::test_normalized_by_one[alg1] [gw1] [ 47%] FAILED tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor [gw6] [ 48%] PASSED tests/test_common.py::test_normalization_monotonic[alg11] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] [gw6] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] [gw6] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] [gw6] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] [gw6] [ 51%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] [gw6] [ 52%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] [gw4] [ 52%] PASSED tests/test_external.py::test_list_of_numbers[JaroWinkler] tests/test_edit/test_editex.py::test_distance[--0] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[--0] tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] tests/test_edit/test_editex.py::test_distance[nelson--12] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[nelson--12] tests/test_edit/test_editex.py::test_distance[-neilsen-14] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[-neilsen-14] tests/test_edit/test_editex.py::test_distance[ab-a-2] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance[ab-a-2] [gw4] [ 53%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] [gw4] [ 53%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] tests/test_edit/test_editex.py::test_distance[ab-c-4] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance[ab-c-4] tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] tests/test_compression/test_arith_ncd.py::test_make_probs [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] [gw4] [ 54%] PASSED tests/test_compression/test_arith_ncd.py::test_make_probs tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] tests/test_edit/test_editex.py::test_distance[-MARTHA-12] [gw6] [ 54%] PASSED tests/test_edit/test_editex.py::test_distance[-MARTHA-12] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] [gw4] [ 55%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] [gw2] [ 55%] FAILED tests/test_external.py::test_qval[1-JaroWinkler] [gw6] [ 55%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] [gw4] [ 55%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] [gw6] [ 56%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_external.py::test_qval[2-Jaro] [gw4] [ 56%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] [gw6] [ 56%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_hamming.py::test_distance[test-text-1] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] [gw6] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-text-1] [gw4] [ 57%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] tests/test_edit/test_hamming.py::test_distance[test-tset-2] [gw6] [ 57%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tset-2] tests/test_edit/test_hamming.py::test_distance[test-qwe-4] [gw4] [ 57%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] [gw5] [ 57%] FAILED tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] [gw6] [ 58%] PASSED tests/test_edit/test_hamming.py::test_distance[test-qwe-4] [gw4] [ 58%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] tests/test_edit/test_hamming.py::test_distance[test-testit-2] [gw6] [ 58%] PASSED tests/test_edit/test_hamming.py::test_distance[test-testit-2] [gw4] [ 58%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] [gw6] [ 58%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] [gw4] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] [gw6] [ 59%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] [gw6] [ 59%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] [gw4] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] [gw1] [ 60%] PASSED tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] [gw6] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor [gw4] [ 60%] PASSED tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] tests/test_edit/test_levenshtein.py::test_distance[test-text-1] [gw6] [ 60%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-text-1] [gw4] [ 61%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] [gw4] [ 61%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] [gw6] [ 61%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] [gw6] [ 61%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] [gw4] [ 62%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] tests/test_edit/test_matrix.py::test_distance[--1] [gw4] [ 62%] PASSED tests/test_edit/test_matrix.py::test_distance[--1] [gw6] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] tests/test_edit/test_matrix.py::test_distance[-a-0] [gw6] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] [gw4] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[-a-0] tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] [gw4] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] tests/test_edit/test_matrix.py::test_distance[A-C--3] tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] [gw6] [ 63%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] [gw4] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[A-C--3] tests/test_edit/test_matrix.py::test_distance[A-A-10] [gw1] [ 64%] FAILED tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor tests/test_edit/test_matrix.py::test_distance[G-G-7] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[A-A-10] tests/test_compression/test_entropy_ncd.py::test_normalization_range tests/test_edit/test_matrix.py::test_distance[T-A--4] [gw4] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[G-G-7] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[T-A--4] tests/test_edit/test_matrix.py::test_distance[T-C-0] tests/test_edit/test_mlipns.py::test_distance[--1] [gw6] [ 65%] PASSED tests/test_edit/test_matrix.py::test_distance[T-C-0] [gw4] [ 65%] PASSED tests/test_edit/test_mlipns.py::test_distance[--1] tests/test_edit/test_matrix.py::test_distance[A-G--1] tests/test_edit/test_mlipns.py::test_distance[a--0] [gw4] [ 65%] PASSED tests/test_edit/test_mlipns.py::test_distance[a--0] [gw6] [ 65%] PASSED tests/test_edit/test_matrix.py::test_distance[A-G--1] [gw0] [ 66%] FAILED tests/test_compression/test_common.py::test_simmetry[alg5] tests/test_edit/test_mlipns.py::test_distance[-a-0] tests/test_compression/test_common.py::test_simmetry[alg6] [gw4] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[-a-0] tests/test_edit/test_matrix.py::test_distance[C-T-0] tests/test_edit/test_mlipns.py::test_distance[a-a-1] [gw4] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[a-a-1] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] [gw4] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] [gw6] [ 66%] PASSED tests/test_edit/test_matrix.py::test_distance[C-T-0] [gw4] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] tests/test_edit/test_mlipns.py::test_distance[ab-a-1] tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] [gw4] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] [gw6] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[ab-a-1] tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] [gw6] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] [gw6] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] [gw4] [ 68%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] [gw4] [ 68%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] [gw6] [ 68%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] [gw4] [ 69%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] [gw4] [ 69%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] [gw6] [ 69%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw6] [ 69%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] [gw4] [ 70%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] [gw4] [ 70%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] [gw4] [ 70%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] [gw6] [ 70%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] [gw6] [ 71%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] [gw6] [ 71%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] [gw4] [ 71%] PASSED tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw6] [ 71%] PASSED tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] tests/test_phonetic/test_editex.py::test_distance[ab-a-2] tests/test_phonetic/test_editex.py::test_distance[--0] [gw6] [ 72%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-a-2] [gw4] [ 72%] PASSED tests/test_phonetic/test_editex.py::test_distance[--0] [gw7] [ 72%] PASSED tests/test_common.py::test_normalization_monotonic[alg23] tests/test_phonetic/test_editex.py::test_distance[nelson--12] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] [gw7] [ 72%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] tests/test_phonetic/test_editex.py::test_distance[ab-c-4] [gw6] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-c-4] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] [gw4] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson--12] tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] [gw7] [ 73%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] [gw6] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] [gw7] [ 74%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] [gw4] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] [gw7] [ 74%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] [gw6] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] [gw7] [ 75%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] [gw7] [ 75%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] [gw6] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] [gw4] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] [gw7] [ 75%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] [gw7] [ 76%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] [gw4] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] [gw4] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] [gw7] [ 77%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] [gw6] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] [gw4] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] tests/test_phonetic/test_editex.py::test_local[--0] tests/test_phonetic/test_editex.py::test_local[-neilsen-14] tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] [gw4] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[-neilsen-14] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[--0] [gw7] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] tests/test_phonetic/test_editex.py::test_local[nelson--12] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson--12] tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] tests/test_phonetic/test_editex.py::test_local[ab-c-2] [gw6] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-c-2] tests/test_phonetic/test_editex.py::test_local[ab-a-2] [gw7] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] [gw4] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-a-2] tests/test_phonetic/test_editex.py::test_local[niall-neal-1] tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] [gw7] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-neal-1] [gw4] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] [gw6] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_local[neal-niall-1] [gw7] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-niall-1] tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] [gw4] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] [gw6] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] [gw3] [ 81%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg1] [gw7] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] tests/test_compression/test_common.py::test_normalized_by_one[alg2] [gw4] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] [gw6] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] [gw7] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] [gw6] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] [gw4] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] [gw7] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] [gw4] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] [gw1] [ 83%] FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] [gw1] [ 83%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] [gw4] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] [gw6] [ 84%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] [gw1] [ 84%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] [gw4] [ 84%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] [gw7] [ 84%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] [gw6] [ 85%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] [gw4] [ 85%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] [gw6] [ 85%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] [gw4] [ 85%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] [gw6] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] [gw4] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] [gw6] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] [gw4] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] tests/test_token/test_bag.py::test_distance[qwe-qwe-0] [gw6] [ 87%] PASSED tests/test_token/test_bag.py::test_distance[qwe-qwe-0] tests/test_token/test_bag.py::test_distance[qwe-erty-3] tests/test_token/test_bag.py::test_distance[qwe-ewq-0] [gw4] [ 87%] PASSED tests/test_token/test_bag.py::test_distance[qwe-erty-3] [gw6] [ 87%] PASSED tests/test_token/test_bag.py::test_distance[qwe-ewq-0] tests/test_token/test_bag.py::test_distance[qwe-rtys-4] [gw7] [ 87%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] tests/test_token/test_cosine.py::test_distance[test-text-0.75] [gw6] [ 88%] PASSED tests/test_token/test_cosine.py::test_distance[test-text-0.75] [gw4] [ 88%] PASSED tests/test_token/test_bag.py::test_distance[qwe-rtys-4] tests/test_token/test_jaccard.py::test_distance[test-text-0.6] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] [gw7] [ 88%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] [gw6] [ 88%] PASSED tests/test_token/test_jaccard.py::test_distance[test-text-0.6] tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] [gw4] [ 89%] PASSED tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] [gw6] [ 89%] PASSED tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] tests/test_token/test_jaccard.py::test_compare_with_tversky [gw7] [ 89%] PASSED tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] [gw6] [ 89%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] [gw6] [ 90%] PASSED tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] [gw6] [ 90%] PASSED tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] tests/test_token/test_sorensen.py::test_distance[test-text-0.75] [gw6] [ 90%] PASSED tests/test_token/test_sorensen.py::test_distance[test-text-0.75] tests/test_token/test_sorensen.py::test_compare_with_tversky [gw0] [ 90%] FAILED tests/test_compression/test_common.py::test_simmetry[alg6] tests/test_compression/test_common.py::test_is_normalized[alg0] [gw5] [ 91%] FAILED tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor tests/test_compression/test_sqrt_ncd.py::test_normalization_range [gw2] [ 91%] FAILED tests/test_external.py::test_qval[2-Jaro] tests/test_edit/test_hamming.py::test_distance[test-tesst-2] [gw2] [ 91%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tesst-2] tests/test_edit/test_hamming.py::test_distance[test-tet-2] [gw2] [ 91%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tet-2] tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] [gw2] [ 91%] PASSED tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] [gw2] [ 92%] PASSED tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] [gw2] [ 92%] PASSED tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] [gw4] [ 92%] FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] [gw2] [ 92%] PASSED tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] [gw3] [ 93%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg2] tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] tests/test_compression/test_common.py::test_normalized_by_one[alg3] tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] [gw2] [ 93%] PASSED tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] [gw4] [ 93%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] [gw1] [ 93%] FAILED tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] [gw1] [ 94%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] [gw6] [ 94%] PASSED tests/test_token/test_sorensen.py::test_compare_with_tversky tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw5] [ 94%] FAILED tests/test_compression/test_sqrt_ncd.py::test_normalization_range tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] [gw3] [ 94%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg3] tests/test_compression/test_common.py::test_normalized_by_one[alg4] [gw0] [ 95%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] tests/test_compression/test_common.py::test_is_normalized[alg1] [gw5] [ 95%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] [gw5] [ 95%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] [gw5] [ 95%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] [gw5] [ 96%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] [gw5] [ 96%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] [gw5] [ 96%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] [gw7] [ 96%] FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set tests/test_token/test_overlap.py::test_distance[test-text-0.75] [gw7] [ 97%] PASSED tests/test_token/test_overlap.py::test_distance[test-text-0.75] [gw6] [ 97%] PASSED tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw0] [ 97%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg1] tests/test_compression/test_common.py::test_is_normalized[alg2] [gw3] [ 97%] PASSED tests/test_compression/test_common.py::test_normalized_by_one[alg4] tests/test_compression/test_common.py::test_normalized_by_one[alg5] [gw0] [ 98%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] tests/test_compression/test_common.py::test_is_normalized[alg3] [gw3] [ 98%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg5] tests/test_compression/test_common.py::test_normalized_by_one[alg6] [gw0] [ 98%] PASSED tests/test_compression/test_common.py::test_is_normalized[alg3] tests/test_compression/test_common.py::test_is_normalized[alg4] [gw3] [ 98%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] [gw3] [ 99%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] [gw3] [ 99%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] [gw0] [ 99%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg4] tests/test_compression/test_common.py::test_is_normalized[alg5] tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] [gw3] [ 99%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] [gw0] [100%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg5] =================================== FAILURES =================================== ________________________ test_normalization_range[alg0] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:50: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(213109792734099347482524661075811432967) to this test or run pytest with --hypothesis-seed=213109792734099347482524661075811432967 to reproduce this failure. ________________________ test_normalization_same[alg0] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = '', alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', Bag({'qval': 1, 'external': True})), kwargs = {}, initial_draws = 1 start = 11933245.024980376, result = None, finish = 11933245.348792404 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=323812) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 323.81ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 1 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 323.81ms, which exceeds the deadline of 200.00ms'), "args = ('', Bag({'qval': 1, 'externa...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Bag({'qval': 1, 'external': True}), E text='', E ) E Unreliable test timings! On an initial run, this test took 323.81ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.54 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg0] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\x8bÎ𭦸lÇf\U0003ecca;\U000f1a9e\U0006d63eA\x8c' alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\x8bÎ𭦸lÇf\U0003ecca;\U000f1a9e\U0006d63eA\x8c', Bag({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933251.229265507, result = None finish = 11933251.752684852, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=523419) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 523.42ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 72 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 523.42ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\x8bÎ𭦸lÇf\\U0003ecca;\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='\x8bÎ𭦸lÇf\U0003ecca;\U000f1a9e\U0006d63eA\x8c', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Bag({'qval': 1, 'external': True}), E left='', E right='\x8bÎ𭦸lÇf\U0003ecca;\U000f1a9e\U0006d63eA\x8c', E ) E Unreliable test timings! On an initial run, this test took 523.42ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.81 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg12] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '\U00055d27³', right = '¥³\U000b5048¡\x00' alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00055d27³', '¥³\U000b5048¡\x00', Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933269.853287209, result = None finish = 11933270.35150715, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=498220) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 498.22ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 39 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 498.22ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00055d27³', '¥³\\U000b50...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U00055d27³', right='¥³\U000b5048¡\x00', alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}), E left='\U00055d27³', E right='¥³\U000b5048¡\x00', E ) E Unreliable test timings! On an initial run, this test took 498.22ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.11 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg1] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '(\U000dccc9濎\x07\U0001a7c0f\x94,\U0006d5d7', right = ']𣲶½ÜÖÔ' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('(\U000dccc9濎\x07\U0001a7c0f\x94,\U0006d5d7', ']𣲶½ÜÖÔ', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933282.382665984, result = None finish = 11933283.009794137, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=627128) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 627.13ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 95 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 627.13ms, which exceeds the deadline of 200.00ms'), "args = ('(\\U000dccc9濎\\x07\\U0001a7...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='(\U000dccc9濎\x07\U0001a7c0f\x94,\U0006d5d7', right=']𣲶½ÜÖÔ', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left='(\U000dccc9濎\x07\U0001a7c0f\x94,\U0006d5d7', E right=']𣲶½ÜÖÔ', E ) E Unreliable test timings! On an initial run, this test took 627.13ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.06 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg1] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '$J', right = '\x10A' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('$J', '\x10A', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933265.650952445, result = None finish = 11933275.328483876, internal_draw_time = 0 runtime = datetime.timedelta(seconds=9, microseconds=677531) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 9677.53ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 23 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 9677.53ms, which exceeds the deadline of 200.00ms'), "args = ('$J', '\\x10A', Hamming({'q...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='$J', right='\x10A', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left='$J', E right='\x10A', E ) E Unreliable test timings! On an initial run, this test took 9677.53ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.06 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg2] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '>í', right = 'Ó¶ÓÍê+\x9få' alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('>í', 'Ó¶ÓÍê+\x9få', Levenshtein({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11933302.638236722, result = None finish = 11933302.96105815, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=322821) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 322.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 52 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 322.82ms, which exceeds the deadline of 200.00ms'), "args = ('>í', 'Ó¶ÓÍê+\\x9få', Levens...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='>í', right='Ó¶ÓÍê+\x9få', alg=Levenshtein({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Levenshtein({'qval': 1, 'test_func': , 'external': True}), E left='>í', E right='Ó¶ÓÍê+\x9få', E ) E Unreliable test timings! On an initial run, this test took 322.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.90 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg12] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = "\U00079c306û\U000a99d6Wý\U000c32c3'zJ\x9dpÄ\U00048dac", right = '\x13Ìo' alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ("\U00079c306û\U000a99d6Wý\U000c32c3'zJ\x9dpÄ\U00048dac", '\x13Ìo', Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933289.654737009, result = None finish = 11933289.978602735, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=323866) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 323.87ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 107 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 323.87ms, which exceeds the deadline of 200.00ms'), 'args = ("\\U00079c306û\\U000a99d6Wý\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left="\U00079c306û\U000a99d6Wý\U000c32c3'zJ\x9dpÄ\U00048dac", right='\x13Ìo', alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}), E left="\U00079c306û\U000a99d6Wý\U000c32c3'zJ\x9dpÄ\U00048dac", E right='\x13Ìo', E ) E Unreliable test timings! On an initial run, this test took 323.87ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.65 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg13] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '\U0008bd1af\U00066ced\x06í\x08\x82', right = 'B¢À¸\U0009f9f4V' alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0008bd1af\U00066ced\x06í\x08\x82', 'B¢À¸\U0009f9f4V', Overlap({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933297.459236676, result = None finish = 11933297.756139604, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=296903) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 296.90ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 72 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 296.90ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0008bd1af\\U00066ced\\x0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U0008bd1af\U00066ced\x06í\x08\x82', right='B¢À¸\U0009f9f4V', alg=Overlap({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Overlap({'qval': 1, 'as_set': False, 'external': True}), E left='\U0008bd1af\U00066ced\x06í\x08\x82', E right='B¢À¸\U0009f9f4V', E ) E Unreliable test timings! On an initial run, this test took 296.90ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.32 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg2] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '\U000c3fe0', right = '`9¸liy\x99ô\U0004aa99' alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000c3fe0', '`9¸liy\x99ô\U0004aa99', Levenshtein({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11933301.033558384, result = None finish = 11933307.071144003, internal_draw_time = 0 runtime = datetime.timedelta(seconds=6, microseconds=37586) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 6037.59ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 50 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 6037.59ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000c3fe0', '`9¸liy\\x99...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U000c3fe0', right='`9¸liy\x99ô\U0004aa99', alg=Levenshtein({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Levenshtein({'qval': 1, 'test_func': , 'external': True}), E left='\U000c3fe0', E right='`9¸liy\x99ô\U0004aa99', E ) E Unreliable test timings! On an initial run, this test took 6037.59ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.78 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg3] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.17 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:60: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(152547040998236905521292196056966499065) to this test or run pytest with --hypothesis-seed=152547040998236905521292196056966499065 to reproduce this failure. _______________________ test_normalization_by_one[alg4] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '\x81jÚ', right = 'ÜP"\\$"𥋐\U000abc79g\x05' alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x81jÚ', 'ÜP"\\$"𥋐\U000abc79g\x05', Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933331.230914377, result = None finish = 11933331.660568114, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=429654) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 429.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 70 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 429.65ms, which exceeds the deadline of 200.00ms'), 'args = (\'\\x81jÚ\', \'ÜP"\\\\$"𥋐\\U...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x81jÚ', right='ÜP"\\$"𥋐\U000abc79g\x05', alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}), E left='\x81jÚ', E right='ÜP"\\$"𥋐\U000abc79g\x05', E ) E Unreliable test timings! On an initial run, this test took 429.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.16 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg5] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '' alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933350.260480512, result = None finish = 11933350.621021243, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=360541) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 360.54ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 360.54ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', JaroWinkler({'qval'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='0', right='', alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 360.54ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.30 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg3] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = 'í \x99' alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('í \x99', DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True})) kwargs = {}, initial_draws = 1, start = 11933340.015093632, result = None finish = 11933340.35288896, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=337795) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 337.80ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 13 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 337.80ms, which exceeds the deadline of 200.00ms'), "args = ('í \\x99', DamerauLevenshtei...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='í \x99', alg=DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), E text='í \x99', E ) E Unreliable test timings! On an initial run, this test took 337.80ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.78 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg13] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '' right = '\x110\U000f9de1&<\x8dü\tXÁm\x80T\U0004ab5cÔ\U000f72b37\U0008ac12\x08\U000b799bsV' alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\x110\U000f9de1&<\x8dü\tXÁm\x80T\U0004ab5cÔ\U000f72b37\U0008ac12\x08\U000b799bsV', Overlap({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933341.027300818, result = None finish = 11933341.350354746, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=323054) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 323.05ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 124 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 323.05ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\x110\\U000f9de1&<\\x8...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='\x110\U000f9de1&<\x8dü\tXÁm\x80T\U0004ab5cÔ\U000f72b37\U0008ac12\x08\U000b799bsV', alg=Overlap({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Overlap({'qval': 1, 'as_set': False, 'external': True}), E left='', E right='\x110\U000f9de1&<\x8dü\tXÁm\x80T\U0004ab5cÔ\U000f72b37\U0008ac12\x08\U000b799bsV', E ) E Unreliable test timings! On an initial run, this test took 323.05ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.45 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg14] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '|c\x95¡', alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('|c\x95¡', Cosine({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 11933336.223046007, result = None finish = 11933336.563293936, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=340248) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 340.25ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 18 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 340.25ms, which exceeds the deadline of 200.00ms'), "args = ('|c\\x95¡', Cosine({'qval': ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='|c\x95¡', alg=Cosine({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Cosine({'qval': 1, 'as_set': False, 'external': True}), E text='|c\x95¡', E ) E Unreliable test timings! On an initial run, this test took 340.25ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.86 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg3] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '\U0004123e\U0005b366Z|Ê\U000caa92À\U00080ad8', right = 'É' alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0004123e\U0005b366Z|Ê\U000caa92À\U00080ad8', 'É', DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True})) kwargs = {}, initial_draws = 2, start = 11933338.309027184, result = None finish = 11933338.753320023, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=444293) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 444.29ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 71 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 444.29ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0004123e\\U0005b366Z|Ê\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U0004123e\U0005b366Z|Ê\U000caa92À\U00080ad8', right='É', alg=DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), E left='\U0004123e\U0005b366Z|Ê\U000caa92À\U00080ad8', E right='É', E ) E Unreliable test timings! On an initial run, this test took 444.29ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.66 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg14] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.03 seconds (0 invalid ones and 2 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:60: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(306338665610728857619030882428214239943) to this test or run pytest with --hypothesis-seed=306338665610728857619030882428214239943 to reproduce this failure. _______________________ test_normalization_range[alg15] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '¿ñ\U000f4ee7ä\U0007d729\U00097159\\\U0005f491' right = '𩹌\U00033a1b\x17z' alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¿ñ\U000f4ee7ä\U0007d729\U00097159\\\U0005f491', '𩹌\U00033a1b\x17z', StrCmp95({'long_strings': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933360.86428332, result = None finish = 11933361.217578853, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=353296) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 353.30ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 85 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 353.30ms, which exceeds the deadline of 200.00ms'), "args = ('¿ñ\\U000f4ee7ä\\U0007d729\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='¿ñ\U000f4ee7ä\U0007d729\U00097159\\\U0005f491', right='𩹌\U00033a1b\x17z', alg=StrCmp95({'long_strings': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=StrCmp95({'long_strings': False, 'external': True}), E left='¿ñ\U000f4ee7ä\U0007d729\U00097159\\\U0005f491', E right='𩹌\U00033a1b\x17z', E ) E Unreliable test timings! On an initial run, this test took 353.30ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.40 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg15] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '' alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', StrCmp95({'long_strings': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933370.468952047, result = None finish = 11933370.962166388, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=493214) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 493.21ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 493.21ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', StrCmp95({'long_str...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='0', right='', alg=StrCmp95({'long_strings': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=StrCmp95({'long_strings': False, 'external': True}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 493.21ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.48 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg6] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '¡}', right = '\x82+' alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¡}', '\x82+', MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933368.64625429, result = None finish = 11933369.202520339, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=556266) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 556.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 17 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 556.27ms, which exceeds the deadline of 200.00ms'), "args = ('¡}', '\\x82+', MLIPNS({'qva...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='¡}', right='\x82+', alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}), E left='¡}', E right='\x82+', E ) E Unreliable test timings! On an initial run, this test took 556.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.07 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg4] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = 'f\x00' alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'f\x00', Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933378.280202117, result = None finish = 11933378.53835674, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=258155) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 258.16ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 10 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 258.16ms, which exceeds the deadline of 200.00ms'), "args = ('', 'f\\x00', Jaro({'qval': ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='f\x00', alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Jaro({'qval': 1, 'long_tolerance': False, 'winklerize': False, 'external': True}), E left='', E right='f\x00', E ) E Unreliable test timings! On an initial run, this test took 258.16ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.26 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg16] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = "'pþ\U00091fb0\x8fÜ", right = '7' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ("'pþ\U00091fb0\x8fÜ", '7', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933386.154989092, result = None finish = 11933394.981074952, internal_draw_time = 0 runtime = datetime.timedelta(seconds=8, microseconds=826086) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 8826.09ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 37 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 8826.09ms, which exceeds the deadline of 200.00ms'), 'args = ("\'pþ\\U00091fb0\\x8fÜ", \'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left="'pþ\U00091fb0\x8fÜ", right='7', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left="'pþ\U00091fb0\x8fÜ", E right='7', E ) E Unreliable test timings! On an initial run, this test took 8826.09ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.08 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg16] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '-Ý\U000f8b1c\x89\U0005895b', right = '\U0001fded' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('-Ý\U000f8b1c\x89\U0005895b', '\U0001fded', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933382.03717254, result = None finish = 11933390.223664941, internal_draw_time = 0 runtime = datetime.timedelta(seconds=8, microseconds=186492) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 8186.49ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 39 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 8186.49ms, which exceeds the deadline of 200.00ms'), "args = ('-Ý\\U000f8b1c\\x89\\U00058...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='-Ý\U000f8b1c\x89\U0005895b', right='\U0001fded', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='-Ý\U000f8b1c\x89\U0005895b', E right='\U0001fded', E ) E Unreliable test timings! On an initial run, this test took 8186.49ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.30 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg7] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '\x11ú\U00019773ÞU', right = 'g𤪏ñ\U000f9d1aÆ\U000d48b9#𦃁Ië' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x11ú\U00019773ÞU', 'g𤪏ñ\U000f9d1aÆ\U000d48b9#𦃁Ië', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11933394.419428403, result = None finish = 11933394.956424847, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=536996) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 537.00ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 88 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 537.00ms, which exceeds the deadline of 200.00ms'), "args = ('\\x11ú\\U00019773ÞU', 'g𤪏ñ\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x11ú\U00019773ÞU', right='g𤪏ñ\U000f9d1aÆ\U000d48b9#𦃁Ië', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E left='\x11ú\U00019773ÞU', E right='g𤪏ñ\U000f9d1aÆ\U000d48b9#𦃁Ië', E ) E Unreliable test timings! On an initial run, this test took 537.00ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.15 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg6] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = '0' alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) kwargs = {}, initial_draws = 1, start = 11933419.814308684, result = None finish = 11933420.156320712, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=342012) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 342.01ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 342.01ms, which exceeds the deadline of 200.00ms'), "args = ('0', MLIPNS({'qval': 1, 'thr...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='0', alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}), E text='0', E ) E Unreliable test timings! On an initial run, this test took 342.01ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.38 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg7] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = '0' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 11933434.839457572, result = None finish = 11933435.563606037, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=724148) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 724.15ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 724.15ms, which exceeds the deadline of 200.00ms'), "args = ('0', LCSSeq({'qval': 1, 'tes...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='0', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E text='0', E ) E Unreliable test timings! On an initial run, this test took 724.15ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.96 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg8] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '0¨\U000b74c7+', alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '0¨\U000b74c7+', LCSStr({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 11933425.279970752, result = None finish = 11933425.552897776, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=272927) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 272.93ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 26 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 272.93ms, which exceeds the deadline of 200.00ms'), "args = ('', '0¨\\U000b74c7+', LCSStr...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='0¨\U000b74c7+', alg=LCSStr({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=LCSStr({'qval': 1, 'external': True}), E left='', E right='0¨\U000b74c7+', E ) E Unreliable test timings! On an initial run, this test took 272.93ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.28 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg17] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\U000d3112', alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U000d3112', MRA({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 11933446.147649543, result = None finish = 11933446.55692498, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=409275) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 409.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 7 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 409.27ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U000d3112', MRA({'qva...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='\U000d3112', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MRA({'qval': 1, 'external': True}), E left='', E right='\U000d3112', E ) E Unreliable test timings! On an initial run, this test took 409.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.62 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg6] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '\U0005c81c\x81ä', right = '#' alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0005c81c\x81ä', '#', MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933447.835330788, result = None finish = 11933448.152795617, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=317465) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 317.46ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 23 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 317.46ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0005c81c\\x81ä', '#', ML...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U0005c81c\x81ä', right='#', alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}), E left='\U0005c81c\x81ä', E right='#', E ) E Unreliable test timings! On an initial run, this test took 317.46ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.56 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg17] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '𮍛', alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𮍛', MRA({'qval': 1, 'external': True})), kwargs = {}, initial_draws = 1 start = 11933449.460093727, result = None, finish = 11933449.962934872 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=502841) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 502.84ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 9 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 502.84ms, which exceeds the deadline of 200.00ms'), "args = ('𮍛', MRA({'qval': 1, 'extern...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='𮍛', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=MRA({'qval': 1, 'external': True}), E text='𮍛', E ) E Unreliable test timings! On an initial run, this test took 502.84ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.21 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg18] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = 'Ā', right = '' alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Ā', '', Prefix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 2, start = 11933462.074951213, result = None finish = 11933462.555896051, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=480945) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 480.94ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 7 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 480.94ms, which exceeds the deadline of 200.00ms'), "args = ('Ā', '', Prefix({'qval': 1, ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='Ā', right='', alg=Prefix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Prefix({'qval': 1, 'sim_test': }), E left='Ā', E right='', E ) E Unreliable test timings! On an initial run, this test took 480.94ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.42 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg18] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '\U0010013d\U0010013d\\"H|', right = '\U0010013d<\\"H|' alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0010013d\U0010013d\\"H|', '\U0010013d<\\"H|', Prefix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 2, start = 11933451.248444183, result = None finish = 11933451.658594016, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=410150) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 410.15ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 62 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 410.15ms, which exceeds the deadline of 200.00ms'), 'args = (\'\\U0010013d\\U0010013d\\\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U0010013d\U0010013d\\"H|', right='\U0010013d<\\"H|', alg=Prefix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Prefix({'qval': 1, 'sim_test': }), E left='\U0010013d\U0010013d\\"H|', E right='\U0010013d<\\"H|', E ) E Unreliable test timings! On an initial run, this test took 410.15ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg8] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = 'µ{ä.R3çÝ\x10ø\x0c', alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('µ{ä.R3çÝ\x10ø\x0c', LCSStr({'qval': 1, 'external': True})), kwargs = {} initial_draws = 1, start = 11933459.866328822, result = None finish = 11933460.957686316, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=91357) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1091.36ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 55 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1091.36ms, which exceeds the deadline of 200.00ms'), "args = ('µ{ä.R3çÝ\\x10ø\\x0c', LCSS...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='µ{ä.R3çÝ\x10ø\x0c', alg=LCSStr({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=LCSStr({'qval': 1, 'external': True}), E text='µ{ä.R3çÝ\x10ø\x0c', E ) E Unreliable test timings! On an initial run, this test took 1091.36ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.07 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg9] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = 'V', alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('V', RatcliffObershelp({'qval': 1, 'external': True})), kwargs = {} initial_draws = 1, start = 11933490.864457084, result = None finish = 11933491.154382808, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=289926) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 289.93ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 7 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 289.93ms, which exceeds the deadline of 200.00ms'), "args = ('V', RatcliffObershelp({'qva...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='V', alg=RatcliffObershelp({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=RatcliffObershelp({'qval': 1, 'external': True}), E text='V', E ) E Unreliable test timings! On an initial run, this test took 289.93ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.35 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg9] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = 'Þ¬', right = 'cÙ\U0009d6f4æ\U00080dadLß' alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Þ¬', 'cÙ\U0009d6f4æ\U00080dadLß', RatcliffObershelp({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933484.476367535, result = None finish = 11933484.955536576, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=479169) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 479.17ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 51 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 479.17ms, which exceeds the deadline of 200.00ms'), "args = ('Þ¬', 'cÙ\\U0009d6f4æ\\U0008...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='Þ¬', right='cÙ\U0009d6f4æ\U00080dadLß', alg=RatcliffObershelp({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=RatcliffObershelp({'qval': 1, 'external': True}), E left='Þ¬', E right='cÙ\U0009d6f4æ\U00080dadLß', E ) E Unreliable test timings! On an initial run, this test took 479.17ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.13 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg7] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '\x98)\x18¸å', right = '\x14_;f\U000dd834D\U0007ff59¦82' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x98)\x18¸å', '\x14_;f\U000dd834D\U0007ff59¦82', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11933502.47764518, result = None finish = 11933502.961742122, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=484097) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 484.10ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 81 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 484.10ms, which exceeds the deadline of 200.00ms'), "args = ('\\x98)\\x18¸å', '\\x14_;f\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\x98)\x18¸å', right='\x14_;f\U000dd834D\U0007ff59¦82', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E left='\x98)\x18¸å', E right='\x14_;f\U000dd834D\U0007ff59¦82', E ) E Unreliable test timings! On an initial run, this test took 484.10ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.07 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg19] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '\U000fdc81Ñ\U000fdc81' alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000fdc81Ñ\U000fdc81', Postfix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 1, start = 11933519.71776886, result = None finish = 11933520.325798212, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=608029) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 608.03ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 16 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 608.03ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000fdc81Ñ\\U000fdc81', P...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000fdc81Ñ\U000fdc81', alg=Postfix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Postfix({'qval': 1, 'sim_test': }), E text='\U000fdc81Ñ\U000fdc81', E ) E Unreliable test timings! On an initial run, this test took 608.03ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.12 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg11] _______________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.16 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:60: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(225246066735336296135519425344114537816) to this test or run pytest with --hypothesis-seed=225246066735336296135519425344114537816 to reproduce this failure. _______________________ test_normalization_range[alg21] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = 'ÂS\x88t`', right = 'ë°Í' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ÂS\x88t`', 'ë°Í', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11933542.07424018, result = None finish = 11933542.556819422, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=482579) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 482.58ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 36 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 482.58ms, which exceeds the deadline of 200.00ms'), "args = ('ÂS\\x88t`', 'ë°Í', Needlema...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='ÂS\x88t`', right='ë°Í', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='ÂS\x88t`', E right='ë°Í', E ) E Unreliable test timings! On an initial run, this test took 482.58ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.14 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg9] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '\xa0', right = '' alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\xa0', '', RatcliffObershelp({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933557.619447015, result = None finish = 11933557.955524044, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=336077) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 336.08ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 336.08ms, which exceeds the deadline of 200.00ms'), "args = ('\\xa0', '', RatcliffObershe...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\xa0', right='', alg=RatcliffObershelp({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=RatcliffObershelp({'qval': 1, 'external': True}), E left='\xa0', E right='', E ) E Unreliable test timings! On an initial run, this test took 336.08ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.98 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg21] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = 'þBÕ+\x92Â', right = 'þBÕ+\x92Â' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('þBÕ+\x92Â', 'þBÕ+\x92Â', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11933559.42428297, result = None finish = 11933559.972245716, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=547963) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 547.96ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 58 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 547.96ms, which exceeds the deadline of 200.00ms'), "args = ('þBÕ+\\x92Â', 'þBÕ+\\x92Â', ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='þBÕ+\x92Â', right='þBÕ+\x92Â', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='þBÕ+\x92Â', E right='þBÕ+\x92Â', E ) E Unreliable test timings! On an initial run, this test took 547.96ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 7.21 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_qval[2-JaroWinkler] ___________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 left = 'M', right = '0', alg = 'JaroWinkler', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('M', '0', 'JaroWinkler', 2), kwargs = {}, initial_draws = 2 start = 11933575.887229284, result = None, finish = 11933576.154997407 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=267768) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 267.77ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 8 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 267.77ms, which exceeds the deadline of 200.00ms'), "args = ('M', '0', 'JaroWinkler', 2),...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='M', right='0', alg='JaroWinkler', qval=2) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=2, E left='M', E right='0', E ) E Unreliable test timings! On an initial run, this test took 267.77ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.27 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg10] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '99ôx' alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '99ôx', Jaccard({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933582.071026014, result = None finish = 11933582.35752024, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=286494) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 286.49ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 19 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 286.49ms, which exceeds the deadline of 200.00ms'), "args = ('', '99ôx', Jaccard({'qval':...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='99ôx', alg=Jaccard({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Jaccard({'qval': 1, 'as_set': False, 'external': True}), E left='', E right='99ôx', E ) E Unreliable test timings! On an initial run, this test took 286.49ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.84 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg22] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = 'QV¥\x00\x9b¡𱵞', right = 'ÖU\x061@\U000e3a2cÜ' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('QV¥\x00\x9b¡𱵞', 'ÖU\x061@\U000e3a2cÜ', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11933589.044535512, result = None finish = 11933589.296657637, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=252122) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 252.12ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 78 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 252.12ms, which exceeds the deadline of 200.00ms'), "args = ('QV¥\\x00\\x9b¡𱵞', 'ÖU\\x061...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='QV¥\x00\x9b¡𱵞', right='ÖU\x061@\U000e3a2cÜ', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='QV¥\x00\x9b¡𱵞', E right='ÖU\x061@\U000e3a2cÜ', E ) E Unreliable test timings! On an initial run, this test took 252.12ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.41 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg21] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '\U00050f014g\U000ce5cb\x8b\U0008990aÀ\U000cd858\U0004084f5A\U0003d736\U000a04d9>' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00050f014g\U000ce5cb\x8b\U0008990aÀ\U000cd858\U0004084f5A\U0003d736\U000a04d9>', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 11933597.090268103, result = None finish = 11933597.371254029, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=280986) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 280.99ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 83 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 280.99ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00050f014g\\U000ce5cb\\x...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U00050f014g\U000ce5cb\x8b\U0008990aÀ\U000cd858\U0004084f5A\U0003d736\U000a04d9>', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E text='\U00050f014g\U000ce5cb\x8b\U0008990aÀ\U000cd858\U0004084f5A\U0003d736\U000a04d9>', E ) E Unreliable test timings! On an initial run, this test took 280.99ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 19.90 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg11] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ó', '\U000b00cdå𒄺\U000984ca', Sorensen({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11933618.094423708, result = None finish = 11933618.36077433, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=266351) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 266.35ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalization_range( E alg=Sorensen({'qval': 1, 'as_set': False, 'external': True}), E left='ó', E right='\U000b00cdå𒄺\U000984ca', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _______________________ test_normalization_range[alg23] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '«Hn\x8a\U00088133~\U0003e94c', right = '«Hn\x8a\U00088133~\U0003e94c' alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'G', 'S', 'Y', 'X', 'P', 'M', 'U', 'T', 'C', 'B', 'N', 'D', 'V', 'Z', 'Q', 'E', 'J', 'R', 'F', 'A', 'I', 'K', 'O', 'L'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('«Hn\x8a\U00088133~\U0003e94c', '«Hn\x8a\U00088133~\U0003e94c', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_c... 'S', 'Y', 'X', 'P', 'M', 'U', 'T', 'C', 'B', 'N', 'D', 'V', 'Z', 'Q', 'E', 'J', 'R', 'F', 'A', 'I', 'K', 'O', 'L'})})) kwargs = {}, initial_draws = 2, start = 11933601.878478816, result = None finish = 11933602.318180852, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=439702) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 439.70ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'G', 'S', 'Y', 'X', 'P', 'M', 'U', 'T', 'C', 'B', 'N', 'D', 'V', 'Z', 'Q', 'E', 'J', 'R', 'F', 'A', 'I', 'K', 'O', 'L'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 86 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 439.70ms, which exceeds the deadline of 200.00ms'), "args = ('«Hn\\x8a\\U00088133~\\U0003...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='«Hn\x8a\U00088133~\U0003e94c', right='«Hn\x8a\U00088133~\U0003e94c', alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'G', 'S', 'Y', 'X', 'P', 'M', 'U', 'T', 'C', 'B', 'N', 'D', 'V', 'Z', 'Q', 'E', 'J', 'R', 'F', 'A', 'I', 'K', 'O', 'L'})})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'G', 'S', 'Y', 'X', 'P', 'M', 'U', 'T', 'C', 'B', 'N', 'D', 'V', 'Z', 'Q', 'E', 'J', 'R', 'F', 'A', 'I', 'K', 'O', 'L'})}), E left='«Hn\x8a\U00088133~\U0003e94c', E right='«Hn\x8a\U00088133~\U0003e94c', E ) E Unreliable test timings! On an initial run, this test took 439.70ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.41 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ______________________________ test_qval[3-Jaro] _______________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 left = '\U0006104e舆VÂ\x80\x83', right = '\U0007574e\x81VÂ\x80\x83', alg = 'Jaro' qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0006104e舆VÂ\x80\x83', '\U0007574e\x81VÂ\x80\x83', 'Jaro', 3) kwargs = {}, initial_draws = 2, start = 11933611.866436971, result = None finish = 11933612.1917631, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=325326) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 325.33ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 56 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 325.33ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0006104e舆VÂ\\x80\\x83', ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\U0006104e舆VÂ\x80\x83', right='\U0007574e\x81VÂ\x80\x83', alg='Jaro', qval=3) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=3, E left='\U0006104e舆VÂ\x80\x83', E right='\U0007574e\x81VÂ\x80\x83', E ) E Unreliable test timings! On an initial run, this test took 325.33ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.35 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg22] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '툀\x800\U00098bd1\U000667cfã$' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('툀\x800\U00098bd1\U000667cfã$', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 11933634.678177131, result = None finish = 11933635.166517071, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=488340) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 488.34ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 53 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 488.34ms, which exceeds the deadline of 200.00ms'), "args = ('툀\\x800\\U00098bd1\\U000667...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='툀\x800\U00098bd1\U000667cfã$', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E text='툀\x800\U00098bd1\U000667cfã$', E ) E Unreliable test timings! On an initial run, this test took 488.34ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.14 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg1] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '', alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', BWTRLENCD({'terminator': '\x00'})), kwargs = {} initial_draws = 2, start = 11933623.096588235, result = None finish = 11933623.572772479, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=476184) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 476.18ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 476.18ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', BWTRLENCD({'termina...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='0', right='', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=BWTRLENCD({'terminator': '\x00'}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 476.18ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 9.31 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg23] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = 'Ǧ\x1f\U000a6291Ê_8', right = 'Ǧ\x1f\U000a6291Ê_8' alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'V', 'P', 'T', 'I', 'A', 'Q', 'F', 'O', 'B', 'M', 'E', 'D', 'U', 'R', 'N', 'K', 'X', 'Z', 'S', 'L', 'Y', 'G', 'C', 'J'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Ǧ\x1f\U000a6291Ê_8', 'Ǧ\x1f\U000a6291Ê_8', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': Fa... 'P', 'T', 'I', 'A', 'Q', 'F', 'O', 'B', 'M', 'E', 'D', 'U', 'R', 'N', 'K', 'X', 'Z', 'S', 'L', 'Y', 'G', 'C', 'J'})})) kwargs = {}, initial_draws = 2, start = 11933632.609730255, result = None finish = 11933632.933342982, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=323613) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 323.61ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'V', 'P', 'T', 'I', 'A', 'Q', 'F', 'O', 'B', 'M', 'E', 'D', 'U', 'R', 'N', 'K', 'X', 'Z', 'S', 'L', 'Y', 'G', 'C', 'J'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 64 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 323.61ms, which exceeds the deadline of 200.00ms'), "args = ('Ǧ\\x1f\\U000a6291Ê_8', 'Ǧ\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='Ǧ\x1f\U000a6291Ê_8', right='Ǧ\x1f\U000a6291Ê_8', alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'V', 'P', 'T', 'I', 'A', 'Q', 'F', 'O', 'B', 'M', 'E', 'D', 'U', 'R', 'N', 'K', 'X', 'Z', 'S', 'L', 'Y', 'G', 'C', 'J'})})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'V', 'P', 'T', 'I', 'A', 'Q', 'F', 'O', 'B', 'M', 'E', 'D', 'U', 'R', 'N', 'K', 'X', 'Z', 'S', 'L', 'Y', 'G', 'C', 'J'})}), E left='Ǧ\x1f\U000a6291Ê_8', E right='Ǧ\x1f\U000a6291Ê_8', E ) E Unreliable test timings! On an initial run, this test took 323.61ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.66 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_qval[None-Jaro] _____________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '\U000bb3410', right = '\x0c\U000a7840\x04¶', alg = 'Jaro', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000bb3410', '\x0c\U000a7840\x04¶', 'Jaro', None), kwargs = {} initial_draws = 2, start = 11933655.847324548, result = None finish = 11933656.162197577, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=314873) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 314.87ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 34 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 314.87ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000bb3410', '\\x0c\\U000...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\U000bb3410', right='\x0c\U000a7840\x04¶', alg='Jaro', qval=None) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=None, E left='\U000bb3410', E right='\x0c\U000a7840\x04¶', E ) E Unreliable test timings! On an initial run, this test took 314.87ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.34 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg0] ______________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 11933670.464473505, result = None finish = 11933670.843801936, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=379328) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 379.33ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 379.33ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', ArithNCD({'base': 2...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='0', right='', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 379.33ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 18.16 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg6] ___________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\x03á\x10 ã+Ù\x87' alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\x03á\x10 ã+Ù\x87', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) kwargs = {}, initial_draws = 2, start = 11933665.134880945, result = None finish = 11933665.567484085, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=432603) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 432.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 43 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 432.60ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\x03á\\x10 ã+Ù\\x87', ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='\x03á\x10 ã+Ù\x87', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='', E right='\x03á\x10 ã+Ù\x87', E ) E Unreliable test timings! On an initial run, this test took 432.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.64 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_qval[None-JaroWinkler] __________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = 'Í', right = '0', alg = 'JaroWinkler', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Í', '0', 'JaroWinkler', None), kwargs = {}, initial_draws = 2 start = 11933672.26805696, result = None, finish = 11933672.561967185 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=293910) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 293.91ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 8 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 293.91ms, which exceeds the deadline of 200.00ms'), "args = ('Í', '0', 'JaroWinkler', Non...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='Í', right='0', alg='JaroWinkler', qval=None) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=None, E left='Í', E right='0', E ) E Unreliable test timings! On an initial run, this test took 293.91ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.28 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ______________________________ test_qval[1-Jaro] _______________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 alg = 'Jaro', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.00 seconds (0 invalid ones and 2 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_external.py:51: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(124254095564587097190136192401238286251) to this test or run pytest with --hypothesis-seed=124254095564587097190136192401238286251 to reproduce this failure. ________________________ test_normalization_same[alg23] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '\U000fdb72' alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'K', 'D', 'E', 'F', 'Z', 'O', 'M', 'U', 'X', 'G', 'V', 'I', 'S', 'Q', 'L', 'B', 'J', 'P', 'A', 'C', 'T', 'N', 'R', 'Y'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000fdb72', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'groupe... 'D', 'E', 'F', 'Z', 'O', 'M', 'U', 'X', 'G', 'V', 'I', 'S', 'Q', 'L', 'B', 'J', 'P', 'A', 'C', 'T', 'N', 'R', 'Y'})})) kwargs = {}, initial_draws = 1, start = 11933665.134044247, result = None finish = 11933665.573198384, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=439154) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 439.15ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'K', 'D', 'E', 'F', 'Z', 'O', 'M', 'U', 'X', 'G', 'V', 'I', 'S', 'Q', 'L', 'B', 'J', 'P', 'A', 'C', 'T', 'N', 'R', 'Y'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 9 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 439.15ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000fdb72', Editex({'matc...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000fdb72', alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'K', 'D', 'E', 'F', 'Z', 'O', 'M', 'U', 'X', 'G', 'V', 'I', 'S', 'Q', 'L', 'B', 'J', 'P', 'A', 'C', 'T', 'N', 'R', 'Y'})})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'K', 'D', 'E', 'F', 'Z', 'O', 'M', 'U', 'X', 'G', 'V', 'I', 'S', 'Q', 'L', 'B', 'J', 'P', 'A', 'C', 'T', 'N', 'R', 'Y'})}), E text='\U000fdb72', E ) E Unreliable test timings! On an initial run, this test took 439.15ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.57 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg2] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 11933699.352884885, result = None, finish = 11933699.603030706 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=250146) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 250.15ms, which exceeds the deadline of 200.00ms E Falsifying example: test_simmetry( E alg=BZ2NCD({}), E left='', E right='', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ___________________________ test_simmetry_compressor ___________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 text = '\U000fc561-³\U000de033\x0bÓ¸\U000a0b88' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_simmetry_compressor(text): tests/test_compression/test_entropy_ncd.py:26: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000fc561-³\U000de033\x0bÓ¸\U000a0b88',), kwargs = {} initial_draws = 1, start = 11933688.47432505, result = None finish = 11933688.773254976, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=298930) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 298.93ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_simmetry_compressor(text): tests/test_compression/test_entropy_ncd.py:26: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 58 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 298.93ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000fc561-³\\U000de033\\x...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry_compressor(text='\U000fc561-³\U000de033\x0bÓ¸\U000a0b88') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry_compressor( E text='\U000fc561-³\U000de033\x0bÓ¸\U000a0b88', E ) E Unreliable test timings! On an initial run, this test took 298.93ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.59 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg3] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.41 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_compression/test_common.py:36: FailedHealthCheck _________________________ test_normalized_by_one[alg0] _________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ù', '\x84\U000f40c4y', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 11933716.051251018, result = None finish = 11933716.379506046, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=328255) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 328.25ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalized_by_one( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='ù', E right='\x84\U000f40c4y', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _____________________________ test_simmetry[alg4] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 11933717.748562764, result = None, finish = 11933718.1830397 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=434477) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 434.48ms, which exceeds the deadline of 200.00ms E Falsifying example: test_simmetry( E alg=ZLIBNCD({}), E left='0', E right='', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _________________________ test_idempotency_compressor __________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 text = '\U000b3a0b' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_idempotency_compressor(text): tests/test_compression/test_entropy_ncd.py:32: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000b3a0b',), kwargs = {}, initial_draws = 1 start = 11933707.990013827, result = None, finish = 11933708.572057975 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=582044) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 582.04ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_idempotency_compressor(text): tests/test_compression/test_entropy_ncd.py:32: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 582.04ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000b3a0b',), kwargs = {}...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_idempotency_compressor(text='\U000b3a0b') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_idempotency_compressor( E text='\U000b3a0b', E ) E Unreliable test timings! On an initial run, this test took 582.04ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.79 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_qval[1-JaroWinkler] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '𰦅', right = '𰥑\x848\U000c39d1\U000454c1Z', alg = 'JaroWinkler', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𰦅', '𰥑\x848\U000c39d1\U000454c1Z', 'JaroWinkler', 1), kwargs = {} initial_draws = 2, start = 11933731.119923113, result = None finish = 11933731.782397468, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=662474) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 662.47ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 47 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 662.47ms, which exceeds the deadline of 200.00ms'), "args = ('𰦅', '𰥑\\x848\\U000c39d1\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='𰦅', right='𰥑\x848\U000c39d1\U000454c1Z', alg='JaroWinkler', qval=1) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=1, E left='𰦅', E right='𰥑\x848\U000c39d1\U000454c1Z', E ) E Unreliable test timings! On an initial run, this test took 662.47ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.75 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_monotonicity_compressor _________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 left = '𐲕\U000609a9£\U00039e44\x8f', right = '\x92' @hypothesis.given( > left=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) tests/test_compression/test_sqrt_ncd.py:38: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𐲕\U000609a9£\U00039e44\x8f', '\x92'), kwargs = {}, initial_draws = 2 start = 11933727.059862863, result = None, finish = 11933727.363649089 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=303786) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 303.79ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) tests/test_compression/test_sqrt_ncd.py:38: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 32 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 303.79ms, which exceeds the deadline of 200.00ms'), "args = ('𐲕\\U000609a9£\\U00039e44\\x...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_monotonicity_compressor(left='𐲕\U000609a9£\U00039e44\x8f', right='\x92') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_monotonicity_compressor( E left='𐲕\U000609a9£\U00039e44\x8f', E right='\x92', E ) E Unreliable test timings! On an initial run, this test took 303.79ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.53 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_distributivity_compressor ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.01 seconds (0 invalid ones and 2 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_compression/test_entropy_ncd.py:49: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(152288752121438189709293657981432689459) to this test or run pytest with --hypothesis-seed=152288752121438189709293657981432689459 to reproduce this failure. _____________________________ test_simmetry[alg5] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '𣰟¿5\x838¯å\x1c', right = '', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𣰟¿5\x838¯å\x1c', '', SqrtNCD({'qval': 1})), kwargs = {} initial_draws = 2, start = 11933748.884019736, result = None finish = 11933749.335280577, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=451261) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 451.26ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 39 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 451.26ms, which exceeds the deadline of 200.00ms'), "args = ('𣰟¿5\\x838¯å\\x1c', '', Sqrt...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='𣰟¿5\x838¯å\x1c', right='', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=SqrtNCD({'qval': 1}), E left='𣰟¿5\x838¯å\x1c', E right='', E ) E Unreliable test timings! On an initial run, this test took 451.26ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.03 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg1] _________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = 'Q\U0001670d\U00060896:', right = '' alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Q\U0001670d\U00060896:', '', BWTRLENCD({'terminator': '\x00'})) kwargs = {}, initial_draws = 2, start = 11933755.66954492, result = None finish = 11933756.817585118, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=148040) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1148.04ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 23 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1148.04ms, which exceeds the deadline of 200.00ms'), "args = ('Q\\U0001670d\\U00060896:',...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='Q\U0001670d\U00060896:', right='', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=BWTRLENCD({'terminator': '\x00'}), E left='Q\U0001670d\U00060896:', E right='', E ) E Unreliable test timings! On an initial run, this test took 1148.04ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.13 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_normalization_range ___________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 text = '\x1a\x8d«eÖâ&2¥\x94?öôï/öµ' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x1a\x8d«eÖâ&2¥\x94?öôï/öµ',), kwargs = {}, initial_draws = 1 start = 11933769.446969904, result = None, finish = 11933769.76892003 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=321950) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 321.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 82 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 321.95ms, which exceeds the deadline of 200.00ms'), "args = ('\\x1a\\x8d«eÖâ&2¥\\x94?öôï/...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(text='\x1a\x8d«eÖâ&2¥\x94?öôï/öµ') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E text='\x1a\x8d«eÖâ&2¥\x94?öôï/öµ', E ) E Unreliable test timings! On an initial run, this test took 321.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.07 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg6] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = "Sç'", alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', "Sç'", EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})), kwargs = {} initial_draws = 2, start = 11933773.869372282, result = None finish = 11933774.168615708, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=299243) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 299.24ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 15 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 299.24ms, which exceeds the deadline of 200.00ms'), 'args = (\'\', "Sç\'", EntropyNCD({\'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right="Sç'", alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='', E right="Sç'", E ) E Unreliable test timings! On an initial run, this test took 299.24ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 6.99 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_distributivity_compressor ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 left1 = '𦌎\U0008075a´', left2 = 'é{\U000cf2e0', right = '𦌎\U0008075a´' @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.text(min_size=1), ) tests/test_compression/test_sqrt_ncd.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𦌎\U0008075a´', 'é{\U000cf2e0', '𦌎\U0008075a´'), kwargs = {} initial_draws = 3, start = 11933799.297374265, result = None finish = 11933799.562704688, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=265330) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 265.33ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left1=hypothesis.strategies.text(min_size=1), left2=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.text(min_size=1), ) tests/test_compression/test_sqrt_ncd.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 59 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 265.33ms, which exceeds the deadline of 200.00ms'), "args = ('𦌎\\U0008075a´', 'é{\\U000cf...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_distributivity_compressor(left1='𦌎\U0008075a´', left2='é{\U000cf2e0', right='𦌎\U0008075a´') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_distributivity_compressor( E left1='𦌎\U0008075a´', E left2='é{\U000cf2e0', E right='𦌎\U0008075a´', E ) E Unreliable test timings! On an initial run, this test took 265.33ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.76 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ______________________________ test_qval[2-Jaro] _______________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '<ó\U0009fe1d', right = 'Y>\r', alg = 'Jaro', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('<ó\U0009fe1d', 'Y>\r', 'Jaro', 2), kwargs = {}, initial_draws = 2 start = 11933797.107331578, result = None, finish = 11933797.3833997 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=276068) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 276.07ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 30 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 276.07ms, which exceeds the deadline of 200.00ms'), "args = ('<ó\\U0009fe1d', 'Y>\\r', 'J...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='<ó\U0009fe1d', right='Y>\r', alg='Jaro', qval=2) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=2, E left='<ó\U0009fe1d', E right='Y>\r', E ) E Unreliable test timings! On an initial run, this test took 276.07ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.88 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________________________ test_compare_with_tversky ___________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', ''), kwargs = {}, initial_draws = 2, start = 11933798.841266926 result = None, finish = 11933799.174008656, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=332742) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 332.74ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:29: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 332.74ms, which exceeds the deadline of 200.00ms'), "args = ('0', ''), kwargs = {}, initi...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky(left='0', right='') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky( E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 332.74ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.28 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg2] _________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '', alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 11933787.59830636, result = None, finish = 11933787.987372896 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=389067) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 389.07ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 389.07ms, which exceeds the deadline of 200.00ms'), "args = ('', '', BZ2NCD({})), kwargs ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='', alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=BZ2NCD({}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 389.07ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 30.51 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_simmetry_compressor ___________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 text = 'yà61' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_simmetry_compressor(text): tests/test_compression/test_sqrt_ncd.py:25: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('yà61',), kwargs = {}, initial_draws = 1, start = 11933800.874374 result = None, finish = 11933801.225445732, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=351072) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 351.07ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_simmetry_compressor(text): tests/test_compression/test_sqrt_ncd.py:25: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 13 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 351.07ms, which exceeds the deadline of 200.00ms'), "args = ('yà61',), kwargs = {}, initi...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry_compressor(text='yà61') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry_compressor( E text='yà61', E ) E Unreliable test timings! On an initial run, this test took 351.07ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.39 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_normalization_range ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '\x9f)\U0004182d\U00096ad3' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_sqrt_ncd.py:59: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x9f)\U0004182d\U00096ad3',), kwargs = {}, initial_draws = 1 start = 11933820.728792407, result = None, finish = 11933821.038208932 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=309417) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 309.42ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_sqrt_ncd.py:59: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 44 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 309.42ms, which exceeds the deadline of 200.00ms'), "args = ('\\x9f)\\U0004182d\\U00096ad...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(text='\x9f)\U0004182d\U00096ad3') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E text='\x9f)\U0004182d\U00096ad3', E ) E Unreliable test timings! On an initial run, this test took 309.42ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.31 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg3] _________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '\U00035d2akÛ~\x9f', right = 'eþ', alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00035d2akÛ~\x9f', 'eþ', RLENCD({'qval': 1})), kwargs = {} initial_draws = 2, start = 11933824.86740696, result = None finish = 11933825.363122504, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=495716) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 495.72ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 33 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 495.72ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00035d2akÛ~\\x9f', 'eþ',...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='\U00035d2akÛ~\x9f', right='eþ', alg=RLENCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=RLENCD({'qval': 1}), E left='\U00035d2akÛ~\x9f', E right='eþ', E ) E Unreliable test timings! On an initial run, this test took 495.72ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.99 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg0] ___________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '_\x93D³' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '_\x93D³', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 11933814.058731332, result = None finish = 11933814.417887963, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=359157) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 359.16ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 23 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 359.16ms, which exceeds the deadline of 200.00ms'), "args = ('', '_\\x93D³', ArithNCD({'b...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='_\x93D³', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='', E right='_\x93D³', E ) E Unreliable test timings! On an initial run, this test took 359.16ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 21.06 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_compare_with_tversky_as_set _______________________ [gw7] linux -- Python 3.12.0 /usr/bin/python3 left = '\U000a3d4d\U0001c0149\x1c', right = '\x02Ò\U00091e2a-' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000a3d4d\U0001c0149\x1c', '\x02Ò\U00091e2a-'), kwargs = {} initial_draws = 2, start = 11933823.021229804, result = None finish = 11933823.324559528, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=303330) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 303.33ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 46 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 303.33ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000a3d4d\\U0001c0149\\x1...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky_as_set(left='\U000a3d4d\U0001c0149\x1c', right='\x02Ò\U00091e2a-') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky_as_set( E left='\U000a3d4d\U0001c0149\x1c', E right='\x02Ò\U00091e2a-', E ) E Unreliable test timings! On an initial run, this test took 303.33ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.58 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg1] ___________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = 'á', right = 'á', alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('á', 'á', BWTRLENCD({'terminator': '\x00'})), kwargs = {} initial_draws = 2, start = 11933847.267704584, result = None finish = 11933847.56110151, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=293397) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 293.40ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 8 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 293.40ms, which exceeds the deadline of 200.00ms'), "args = ('á', 'á', BWTRLENCD({'termin...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='á', right='á', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=BWTRLENCD({'terminator': '\x00'}), E left='á', E right='á', E ) E Unreliable test timings! On an initial run, this test took 293.40ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.32 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg2] ___________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 11933880.27675912, result = None, finish = 11933880.827484965 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=550726) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 550.73ms, which exceeds the deadline of 200.00ms E Falsifying example: test_is_normalized( E alg=BZ2NCD({}), E left='0', E right='', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _________________________ test_normalized_by_one[alg5] _________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '0', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '0', SqrtNCD({'qval': 1})), kwargs = {}, initial_draws = 2 start = 11933873.018900497, result = None, finish = 11933873.362708824 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=343808) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 343.81ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 343.81ms, which exceeds the deadline of 200.00ms'), "args = ('', '0', SqrtNCD({'qval': 1}...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='0', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=SqrtNCD({'qval': 1}), E left='', E right='0', E ) E Unreliable test timings! On an initial run, this test took 343.81ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.07 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg6] _________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '/06', right = "0'&", alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('/06', "0'&", EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) kwargs = {}, initial_draws = 2, start = 11933913.740834191, result = None finish = 11933914.164122729, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=423289) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 423.29ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 27 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 423.29ms, which exceeds the deadline of 200.00ms'), 'args = (\'/06\', "0\'&", EntropyNCD(...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='/06', right="0'&", alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='/06', E right="0'&", E ) E Unreliable test timings! On an initial run, this test took 423.29ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.28 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg4] ___________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = 'r¦ú\x01æ¿\x1c', right = '㦾', alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('r¦ú\x01æ¿\x1c', '㦾', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 11933910.22098929, result = None, finish = 11933910.57964932 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=358660) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 358.66ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 43 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 358.66ms, which exceeds the deadline of 200.00ms'), "args = ('r¦ú\\x01æ¿\\x1c', '㦾', ZLIB...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='r¦ú\x01æ¿\x1c', right='㦾', alg=ZLIBNCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ZLIBNCD({}), E left='r¦ú\x01æ¿\x1c', E right='㦾', E ) E Unreliable test timings! On an initial run, this test took 358.66ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.47 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg5] ___________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', SqrtNCD({'qval': 1})), kwargs = {}, initial_draws = 2 start = 11933928.469449855, result = None, finish = 11933928.775660183 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=306210) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 306.21ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 306.21ms, which exceeds the deadline of 200.00ms'), "args = ('', '', SqrtNCD({'qval': 1})...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=SqrtNCD({'qval': 1}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 306.21ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.33 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky =========================== short test summary info ============================ FAILED tests/test_common.py::test_normalization_range[alg0] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg0] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg0] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg12] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg1] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg1] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg2] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg12] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg13] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg2] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg3] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg4] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg5] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg3] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg13] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg14] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg3] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg14] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg15] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg15] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg6] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg4] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg16] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg16] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg7] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg6] - hypothesis.error... FAILED tests/test_common.py::test_normalization_same[alg7] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg8] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg17] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg6] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg17] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg18] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg18] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg8] - hypothesis.error... FAILED tests/test_common.py::test_normalization_same[alg9] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg9] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg7] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg19] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg11] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg21] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg9] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg21] - hypothesis.er... FAILED tests/test_external.py::test_qval[2-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_common.py::test_normalization_range[alg10] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg22] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg21] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg11] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg23] - hypothesis.err... FAILED tests/test_external.py::test_qval[3-Jaro] - hypothesis.errors.Flaky: H... FAILED tests/test_common.py::test_normalization_same[alg22] - hypothesis.erro... FAILED tests/test_compression/test_common.py::test_simmetry[alg1] - hypothesi... FAILED tests/test_common.py::test_normalization_by_one[alg23] - hypothesis.er... FAILED tests/test_external.py::test_qval[None-Jaro] - hypothesis.errors.Flaky... FAILED tests/test_compression/test_common.py::test_simmetry[alg0] - hypothesi... FAILED tests/test_compression/test_common.py::test_is_normalized[alg6] - hypo... FAILED tests/test_external.py::test_qval[None-JaroWinkler] - hypothesis.error... FAILED tests/test_external.py::test_qval[1-Jaro] - hypothesis.errors.FailedHe... FAILED tests/test_common.py::test_normalization_same[alg23] - hypothesis.erro... FAILED tests/test_compression/test_common.py::test_simmetry[alg2] - hypothesi... FAILED tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor FAILED tests/test_compression/test_common.py::test_simmetry[alg3] - hypothesi... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg0] - ... FAILED tests/test_compression/test_common.py::test_simmetry[alg4] - hypothesi... FAILED tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor FAILED tests/test_external.py::test_qval[1-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor FAILED tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor FAILED tests/test_compression/test_common.py::test_simmetry[alg5] - hypothesi... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg1] - ... FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range FAILED tests/test_compression/test_common.py::test_simmetry[alg6] - hypothesi... FAILED tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor FAILED tests/test_external.py::test_qval[2-Jaro] - hypothesis.errors.Flaky: H... FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky - hypothes... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg2] - ... FAILED tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor - hy... FAILED tests/test_compression/test_sqrt_ncd.py::test_normalization_range - hy... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg3] - ... FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] - hypo... FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set - h... FAILED tests/test_compression/test_common.py::test_is_normalized[alg1] - hypo... FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] - hypo... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg5] - ... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] - ... FAILED tests/test_compression/test_common.py::test_is_normalized[alg4] - hypo... FAILED tests/test_compression/test_common.py::test_is_normalized[alg5] - hypo... ================= 86 failed, 326 passed in 1052.04s (0:17:32) ================== RPM build errors: error: Bad exit status from /var/tmp/rpm-tmp.e3J1OY (%check) Bad exit status from /var/tmp/rpm-tmp.e3J1OY (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec