Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2637424-66178/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1711584000 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2637424-66178/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1711497600 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.jhFxcW + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.dHvLxS + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement not satisfied: setuptools >= 40.8 Handling wheel from default build backend Requirement not satisfied: wheel Exiting dependency generation pass: build backend + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2637424-66178/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1711497600 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.8gQZ26 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.VOKXd2 + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'jaro' Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'jarowinkler' Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'levenshtein' Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: hypothesis ; extra == 'test' Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement not satisfied: numpy ; extra == 'test' Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2637424-66178/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1711497600 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.gG5856 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf textdistance-4.6.1 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/textdistance-4.6.1.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd textdistance-4.6.1 + rm -rf /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/textdistance-4.6.1-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + sed -r -i 's/^([[:blank:]]*)(.*\b(isort)\b)/\1# \2/' setup.py + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.qZi5MN + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info creating textdistance.egg-info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt writing manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-textdistance-4.6.1-1.fc40.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'], chrootPath='/var/lib/mock/f40-build-2637424-66178/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1711497600 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.wQRQHu + umask 022 + cd /builddir/build/BUILD + cd textdistance-4.6.1 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x test,Jaro,JaroWinkler,Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'dameraulevenshtein' Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'common' Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'common' Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'common' Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'common' Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'common' Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extra' Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extra' Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extra' Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extra' Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extra' Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'extras' Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'extras' Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'extras' Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'extras' Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'extras' Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(tomli) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + echo -n + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + RPM_TOXENV=py312 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires -x extras -x common -x extra -x test -x DamerauLevenshtein -x Jaro -x JaroWinkler -x Levenshtein Handling setuptools >= 40.8 from default build backend Requirement satisfied: setuptools >= 40.8 (installed: setuptools 68.2.2) Handling wheel from default build backend Requirement satisfied: wheel (installed: wheel 0.41.2) running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.41.2) running dist_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/textdistance-4.6.1.dist-info' Handling rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'dameraulevenshtein' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'dameraulevenshtein' (installed: jellyfish 0.9.0) Handling pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'dameraulevenshtein' (installed: pyxDamerauLevenshtein 1.7.1) Handling Levenshtein ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'hamming' Handling jellyfish ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'hamming' Handling distance ; extra == 'hamming' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'hamming' Handling rapidfuzz >=2.6.0 ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jaro' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'jaro' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'jaro' (installed: Levenshtein 0.21.0) Handling rapidfuzz >=2.6.0 ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'jarowinkler' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'jarowinkler' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'jarowinkler' (installed: jellyfish 0.9.0) Handling rapidfuzz >=2.6.0 ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'levenshtein' (installed: rapidfuzz 2.13.7) Handling Levenshtein ; extra == 'levenshtein' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'levenshtein' (installed: Levenshtein 0.21.0) Handling jellyfish ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'all' Handling numpy ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'all' Handling Levenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'all' Handling pyxDamerauLevenshtein ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'all' Handling rapidfuzz >=2.6.0 ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'all' Handling distance ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'all' Handling pylev ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'all' Handling py-stringmatching ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'all' Handling tabulate ; extra == 'all' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'all' Handling jellyfish ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmark' Handling numpy ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmark' Handling Levenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmark' Handling pyxDamerauLevenshtein ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmark' Handling rapidfuzz >=2.6.0 ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmark' Handling distance ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmark' Handling pylev ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmark' Handling py-stringmatching ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmark' Handling tabulate ; extra == 'benchmark' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmark' Handling jellyfish ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: jellyfish ; extra == 'benchmarks' Handling numpy ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: numpy ; extra == 'benchmarks' Handling Levenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: Levenshtein ; extra == 'benchmarks' Handling pyxDamerauLevenshtein ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pyxDamerauLevenshtein ; extra == 'benchmarks' Handling rapidfuzz >=2.6.0 ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: rapidfuzz >=2.6.0 ; extra == 'benchmarks' Handling distance ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: distance ; extra == 'benchmarks' Handling pylev ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pylev ; extra == 'benchmarks' Handling py-stringmatching ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: py-stringmatching ; extra == 'benchmarks' Handling tabulate ; extra == 'benchmarks' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: tabulate ; extra == 'benchmarks' Handling jellyfish ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'common' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'common' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'common' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'common' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'common' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'common' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extra' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extra' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extra' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extra' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extra' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extra' (installed: rapidfuzz 2.13.7) Handling jellyfish ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: jellyfish ; extra == 'extras' (installed: jellyfish 0.9.0) Handling numpy ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'extras' (installed: numpy 1.26.0) Handling Levenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: Levenshtein ; extra == 'extras' (installed: Levenshtein 0.21.0) Handling pyxDamerauLevenshtein ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pyxDamerauLevenshtein ; extra == 'extras' (installed: pyxDamerauLevenshtein 1.7.1) Handling rapidfuzz >=2.6.0 ; extra == 'extras' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: rapidfuzz >=2.6.0 ; extra == 'extras' (installed: rapidfuzz 2.13.7) Handling twine ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: twine ; extra == 'lint' Handling mypy ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: mypy ; extra == 'lint' Handling flake8 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8 ; extra == 'lint' Handling types-tabulate ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: types-tabulate ; extra == 'lint' Handling flake8-blind-except ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-blind-except ; extra == 'lint' Handling flake8-bugbear ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-bugbear ; extra == 'lint' Handling flake8-commas ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-commas ; extra == 'lint' Handling flake8-logging-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-logging-format ; extra == 'lint' Handling flake8-mutable ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-mutable ; extra == 'lint' Handling flake8-pep3101 ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-pep3101 ; extra == 'lint' Handling flake8-quotes ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-quotes ; extra == 'lint' Handling flake8-string-format ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-string-format ; extra == 'lint' Handling flake8-tidy-imports ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: flake8-tidy-imports ; extra == 'lint' Handling pep8-naming ; extra == 'lint' from hook generated metadata: Requires-Dist (textdistance) Ignoring alien requirement: pep8-naming ; extra == 'lint' Handling hypothesis ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: hypothesis ; extra == 'test' (installed: hypothesis 6.82.0) Handling numpy ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: numpy ; extra == 'test' (installed: numpy 1.26.0) Handling pytest ; extra == 'test' from hook generated metadata: Requires-Dist (textdistance) Requirement satisfied: pytest ; extra == 'test' (installed: pytest 7.4.3) + cat /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-buildrequires + rm -rfv textdistance-4.6.1.dist-info/ removed 'textdistance-4.6.1.dist-info/LICENSE' removed 'textdistance-4.6.1.dist-info/top_level.txt' removed 'textdistance-4.6.1.dist-info/METADATA' removed directory 'textdistance-4.6.1.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.xhfD8e + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + mkdir -p /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing /builddir/build/BUILD/textdistance-4.6.1 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) running dist_info creating /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance.egg-info writing /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance.egg-info/PKG-INFO writing dependency_links to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance.egg-info/dependency_links.txt writing requirements to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance.egg-info/requires.txt writing top-level names to /builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance.egg-info/top_level.txt writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance.egg-info/SOURCES.txt' reading manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-modern-metadata-09nbyg3x/textdistance-4.6.1.dist-info' Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: textdistance Building wheel for textdistance (pyproject.toml): started Running command Building wheel for textdistance (pyproject.toml) running bdist_wheel running build running build_py creating build creating build/lib creating build/lib/textdistance copying textdistance/benchmark.py -> build/lib/textdistance copying textdistance/libraries.py -> build/lib/textdistance copying textdistance/__init__.py -> build/lib/textdistance copying textdistance/utils.py -> build/lib/textdistance creating build/lib/textdistance/algorithms copying textdistance/algorithms/vector_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/compression_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/simple.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/sequence_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/base.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/edit_based.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/types.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/__init__.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/phonetic.py -> build/lib/textdistance/algorithms copying textdistance/algorithms/token_based.py -> build/lib/textdistance/algorithms copying textdistance/libraries.json -> build/lib/textdistance installing to build/bdist.linux-riscv64/wheel running install running install_lib creating build/bdist.linux-riscv64 creating build/bdist.linux-riscv64/wheel creating build/bdist.linux-riscv64/wheel/textdistance creating build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/vector_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/compression_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/simple.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/sequence_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/base.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/edit_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/types.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/phonetic.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/algorithms/token_based.py -> build/bdist.linux-riscv64/wheel/textdistance/algorithms copying build/lib/textdistance/libraries.json -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/benchmark.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/libraries.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/__init__.py -> build/bdist.linux-riscv64/wheel/textdistance copying build/lib/textdistance/utils.py -> build/bdist.linux-riscv64/wheel/textdistance running install_egg_info running egg_info writing textdistance.egg-info/PKG-INFO writing dependency_links to textdistance.egg-info/dependency_links.txt writing requirements to textdistance.egg-info/requires.txt writing top-level names to textdistance.egg-info/top_level.txt reading manifest file 'textdistance.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' adding license file 'LICENSE' writing manifest file 'textdistance.egg-info/SOURCES.txt' Copying textdistance.egg-info to build/bdist.linux-riscv64/wheel/textdistance-4.6.1-py3.12.egg-info running install_scripts creating build/bdist.linux-riscv64/wheel/textdistance-4.6.1.dist-info/WHEEL creating '/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir/pip-wheel-syym5bqq/.tmp-p4dnp4h_/textdistance-4.6.1-py3-none-any.whl' and adding 'build/bdist.linux-riscv64/wheel' to it adding 'textdistance/__init__.py' adding 'textdistance/benchmark.py' adding 'textdistance/libraries.json' adding 'textdistance/libraries.py' adding 'textdistance/utils.py' adding 'textdistance/algorithms/__init__.py' adding 'textdistance/algorithms/base.py' adding 'textdistance/algorithms/compression_based.py' adding 'textdistance/algorithms/edit_based.py' adding 'textdistance/algorithms/phonetic.py' adding 'textdistance/algorithms/sequence_based.py' adding 'textdistance/algorithms/simple.py' adding 'textdistance/algorithms/token_based.py' adding 'textdistance/algorithms/types.py' adding 'textdistance/algorithms/vector_based.py' adding 'textdistance-4.6.1.dist-info/LICENSE' adding 'textdistance-4.6.1.dist-info/METADATA' adding 'textdistance-4.6.1.dist-info/WHEEL' adding 'textdistance-4.6.1.dist-info/top_level.txt' adding 'textdistance-4.6.1.dist-info/RECORD' removing build/bdist.linux-riscv64/wheel Building wheel for textdistance (pyproject.toml): finished with status 'done' Created wheel for textdistance: filename=textdistance-4.6.1-py3-none-any.whl size=31018 sha256=4a2ae4c434c44d42211e7785e7400eae8f9e006afbb6162cc51edf195351c0b5 Stored in directory: /builddir/.cache/pip/wheels/af/08/72/d6baf94a0831066222f63a4e4a469ea938a661b7e8974b7b68 Successfully built textdistance + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.GLAcb2 + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 ++ xargs basename --multiple ++ ls /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=textdistance==4.6.1 + '[' -z textdistance==4.6.1 ']' + TMPDIR=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir textdistance==4.6.1 Using pip 23.3.2 from /usr/lib/python3.12/site-packages/pip (python 3.12) Looking in links: /builddir/build/BUILD/textdistance-4.6.1/pyproject-wheeldir Processing ./pyproject-wheeldir/textdistance-4.6.1-py3-none-any.whl Installing collected packages: textdistance Successfully installed textdistance-4.6.1 + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + site_dirs+=("/usr/lib/python3.12/site-packages") + '[' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages '!=' /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --record /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD --output /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages/textdistance-4.6.1.dist-info/REQUESTED' ++ wc -l /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-ghost-distinfo ++ cut -f1 '-d ' + lines=1 + '[' 1 -ne 1 ']' + RPM_PERCENTAGES_COUNT=2 + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch --sitelib /usr/lib/python3.12/site-packages --sitearch /usr/lib64/python3.12/site-packages --python-version 3.12 --pyproject-record /builddir/build/BUILD/python-textdistance-4.6.1-1.fc40.noarch-pyproject-record --prefix /usr -l textdistance + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 4.6.1-1.fc40 --unique-debug-suffix -4.6.1-1.fc40.noarch --unique-debug-src-base python-textdistance-4.6.1-1.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/textdistance-4.6.1 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.x4MtTE + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd textdistance-4.6.1 + k='not test_compare[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein]' + k='not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-textdistance-4.6.1-1.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/textdistance-4.6.1/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest -v -k 'not test_compare[Hamming] and not test_compare[Levenshtein] and not test_list_of_numbers[Hamming] and not test_list_of_numbers[Levenshtein] and not test_qval[1-Hamming] and not test_qval[1-Levenshtein] and not test_qval[2-Hamming] and not test_qval[2-Levenshtein] and not test_qval[3-Hamming] and not test_qval[3-Levenshtein] and not test_qval[None-Hamming] and not test_qval[None-Levenshtein] and not test_compare[DamerauLevenshtein] and not test_list_of_numbers[DamerauLevenshtein] and not test_qval[1-DamerauLevenshtein] and not test_qval[2-DamerauLevenshtein] and not test_qval[3-DamerauLevenshtein] and not test_qval[None-DamerauLevenshtein]' -n auto ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.4.3, pluggy-1.3.0 -- /usr/bin/python3 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/builddir/build/BUILD/textdistance-4.6.1/.hypothesis/examples') rootdir: /builddir/build/BUILD/textdistance-4.6.1 configfile: setup.cfg plugins: hypothesis-6.82.0, xdist-3.5.0 created: 8/8 workers 8 workers [412 items] scheduling tests via LoadScheduling tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_monotonic[alg12] tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_monotonic[alg0] [gw5] [ 0%] FAILED tests/test_common.py::test_normalization_same[alg12] tests/test_common.py::test_normalization_same[alg13] [gw0] [ 0%] FAILED tests/test_common.py::test_normalization_range[alg0] tests/test_common.py::test_normalization_range[alg1] [gw7] [ 0%] PASSED tests/test_common.py::test_normalization_monotonic[alg12] tests/test_common.py::test_normalization_monotonic[alg13] [gw6] [ 0%] PASSED tests/test_common.py::test_normalization_monotonic[alg0] tests/test_common.py::test_normalization_monotonic[alg1] [gw0] [ 1%] FAILED tests/test_common.py::test_normalization_range[alg1] tests/test_common.py::test_normalization_range[alg2] [gw4] [ 1%] FAILED tests/test_common.py::test_normalization_same[alg0] tests/test_common.py::test_normalization_same[alg1] [gw5] [ 1%] PASSED tests/test_common.py::test_normalization_same[alg13] tests/test_common.py::test_normalization_same[alg14] [gw4] [ 1%] FAILED tests/test_common.py::test_normalization_same[alg1] tests/test_common.py::test_normalization_same[alg2] [gw1] [ 2%] FAILED tests/test_common.py::test_normalization_range[alg12] tests/test_common.py::test_normalization_range[alg13] [gw3] [ 2%] FAILED tests/test_common.py::test_normalization_by_one[alg12] tests/test_common.py::test_normalization_by_one[alg13] [gw2] [ 2%] FAILED tests/test_common.py::test_normalization_by_one[alg0] tests/test_common.py::test_normalization_by_one[alg1] [gw2] [ 2%] FAILED tests/test_common.py::test_normalization_by_one[alg1] tests/test_common.py::test_normalization_by_one[alg2] [gw0] [ 3%] FAILED tests/test_common.py::test_normalization_range[alg2] tests/test_common.py::test_normalization_range[alg3] [gw4] [ 3%] PASSED tests/test_common.py::test_normalization_same[alg2] tests/test_common.py::test_normalization_same[alg3] [gw6] [ 3%] PASSED tests/test_common.py::test_normalization_monotonic[alg1] tests/test_common.py::test_normalization_monotonic[alg2] [gw7] [ 3%] PASSED tests/test_common.py::test_normalization_monotonic[alg13] tests/test_common.py::test_normalization_monotonic[alg14] [gw2] [ 4%] FAILED tests/test_common.py::test_normalization_by_one[alg2] tests/test_common.py::test_normalization_by_one[alg3] [gw3] [ 4%] FAILED tests/test_common.py::test_normalization_by_one[alg13] tests/test_common.py::test_normalization_by_one[alg14] [gw5] [ 4%] FAILED tests/test_common.py::test_normalization_same[alg14] tests/test_common.py::test_normalization_same[alg15] [gw0] [ 4%] PASSED tests/test_common.py::test_normalization_range[alg3] tests/test_common.py::test_normalization_range[alg4] [gw4] [ 5%] PASSED tests/test_common.py::test_normalization_same[alg3] tests/test_common.py::test_normalization_same[alg4] [gw1] [ 5%] FAILED tests/test_common.py::test_normalization_range[alg13] tests/test_common.py::test_normalization_range[alg14] [gw5] [ 5%] PASSED tests/test_common.py::test_normalization_same[alg15] tests/test_common.py::test_normalization_same[alg16] [gw6] [ 5%] PASSED tests/test_common.py::test_normalization_monotonic[alg2] tests/test_common.py::test_normalization_monotonic[alg3] [gw7] [ 6%] PASSED tests/test_common.py::test_normalization_monotonic[alg14] tests/test_common.py::test_normalization_monotonic[alg15] [gw2] [ 6%] FAILED tests/test_common.py::test_normalization_by_one[alg3] tests/test_common.py::test_normalization_by_one[alg4] [gw3] [ 6%] PASSED tests/test_common.py::test_normalization_by_one[alg14] tests/test_common.py::test_normalization_by_one[alg15] [gw4] [ 6%] PASSED tests/test_common.py::test_normalization_same[alg4] tests/test_common.py::test_normalization_same[alg5] [gw1] [ 7%] PASSED tests/test_common.py::test_normalization_range[alg14] tests/test_common.py::test_normalization_range[alg15] [gw0] [ 7%] PASSED tests/test_common.py::test_normalization_range[alg4] tests/test_common.py::test_normalization_range[alg5] [gw7] [ 7%] PASSED tests/test_common.py::test_normalization_monotonic[alg15] tests/test_common.py::test_normalization_monotonic[alg16] [gw2] [ 7%] PASSED tests/test_common.py::test_normalization_by_one[alg4] [gw6] [ 8%] PASSED tests/test_common.py::test_normalization_monotonic[alg3] tests/test_common.py::test_normalization_by_one[alg5] tests/test_common.py::test_normalization_monotonic[alg4] [gw1] [ 8%] FAILED tests/test_common.py::test_normalization_range[alg15] tests/test_common.py::test_normalization_range[alg16] [gw5] [ 8%] FAILED tests/test_common.py::test_normalization_same[alg16] tests/test_common.py::test_normalization_same[alg17] [gw4] [ 8%] FAILED tests/test_common.py::test_normalization_same[alg5] tests/test_common.py::test_normalization_same[alg6] [gw3] [ 8%] FAILED tests/test_common.py::test_normalization_by_one[alg15] tests/test_common.py::test_normalization_by_one[alg16] [gw0] [ 9%] FAILED tests/test_common.py::test_normalization_range[alg5] tests/test_common.py::test_normalization_range[alg6] [gw2] [ 9%] PASSED tests/test_common.py::test_normalization_by_one[alg5] tests/test_common.py::test_normalization_by_one[alg6] [gw1] [ 9%] FAILED tests/test_common.py::test_normalization_range[alg16] tests/test_common.py::test_normalization_range[alg17] [gw5] [ 9%] FAILED tests/test_common.py::test_normalization_same[alg17] [gw6] [ 10%] PASSED tests/test_common.py::test_normalization_monotonic[alg4] tests/test_common.py::test_normalization_same[alg18] tests/test_common.py::test_normalization_monotonic[alg5] [gw4] [ 10%] PASSED tests/test_common.py::test_normalization_same[alg6] tests/test_common.py::test_normalization_same[alg7] [gw5] [ 10%] FAILED tests/test_common.py::test_normalization_same[alg18] tests/test_common.py::test_normalization_same[alg19] [gw2] [ 10%] FAILED tests/test_common.py::test_normalization_by_one[alg6] tests/test_common.py::test_normalization_by_one[alg7] [gw1] [ 11%] FAILED tests/test_common.py::test_normalization_range[alg17] tests/test_common.py::test_normalization_range[alg18] [gw3] [ 11%] FAILED tests/test_common.py::test_normalization_by_one[alg16] tests/test_common.py::test_normalization_by_one[alg17] [gw0] [ 11%] FAILED tests/test_common.py::test_normalization_range[alg6] tests/test_common.py::test_normalization_range[alg7] [gw7] [ 11%] PASSED tests/test_common.py::test_normalization_monotonic[alg16] tests/test_common.py::test_normalization_monotonic[alg17] [gw3] [ 12%] FAILED tests/test_common.py::test_normalization_by_one[alg17] tests/test_common.py::test_normalization_by_one[alg18] [gw4] [ 12%] FAILED tests/test_common.py::test_normalization_same[alg7] tests/test_common.py::test_normalization_same[alg8] [gw6] [ 12%] PASSED tests/test_common.py::test_normalization_monotonic[alg5] tests/test_common.py::test_normalization_monotonic[alg6] [gw2] [ 12%] FAILED tests/test_common.py::test_normalization_by_one[alg7] tests/test_common.py::test_normalization_by_one[alg8] [gw4] [ 13%] FAILED tests/test_common.py::test_normalization_same[alg8] tests/test_common.py::test_normalization_same[alg9] [gw1] [ 13%] PASSED tests/test_common.py::test_normalization_range[alg18] tests/test_common.py::test_normalization_range[alg19] [gw0] [ 13%] FAILED tests/test_common.py::test_normalization_range[alg7] tests/test_common.py::test_normalization_range[alg8] [gw5] [ 13%] FAILED tests/test_common.py::test_normalization_same[alg19] tests/test_common.py::test_normalization_same[alg20] [gw7] [ 14%] PASSED tests/test_common.py::test_normalization_monotonic[alg17] [gw4] [ 14%] FAILED tests/test_common.py::test_normalization_same[alg9] tests/test_common.py::test_normalization_monotonic[alg18] tests/test_common.py::test_normalization_same[alg10] [gw4] [ 14%] FAILED tests/test_common.py::test_normalization_same[alg10] tests/test_common.py::test_normalization_same[alg11] [gw3] [ 14%] PASSED tests/test_common.py::test_normalization_by_one[alg18] tests/test_common.py::test_normalization_by_one[alg19] [gw6] [ 15%] PASSED tests/test_common.py::test_normalization_monotonic[alg6] tests/test_common.py::test_normalization_monotonic[alg7] [gw5] [ 15%] PASSED tests/test_common.py::test_normalization_same[alg20] tests/test_common.py::test_normalization_same[alg21] [gw4] [ 15%] PASSED tests/test_common.py::test_normalization_same[alg11] [gw2] [ 15%] FAILED tests/test_common.py::test_normalization_by_one[alg8] tests/test_common.py::test_no_common_chars[alg0] tests/test_common.py::test_normalization_by_one[alg9] [gw4] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg0] tests/test_common.py::test_no_common_chars[alg1] [gw4] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg1] tests/test_common.py::test_no_common_chars[alg2] [gw4] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg2] tests/test_common.py::test_no_common_chars[alg3] [gw4] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg3] tests/test_common.py::test_no_common_chars[alg4] [gw4] [ 16%] PASSED tests/test_common.py::test_no_common_chars[alg4] tests/test_common.py::test_no_common_chars[alg5] [gw4] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg5] tests/test_common.py::test_no_common_chars[alg6] [gw4] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg6] tests/test_common.py::test_no_common_chars[alg7] [gw4] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg7] tests/test_common.py::test_no_common_chars[alg8] [gw4] [ 17%] PASSED tests/test_common.py::test_no_common_chars[alg8] [gw0] [ 18%] FAILED tests/test_common.py::test_normalization_range[alg8] tests/test_common.py::test_no_common_chars[alg9] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg9] tests/test_common.py::test_normalization_range[alg9] tests/test_common.py::test_no_common_chars[alg10] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg10] tests/test_common.py::test_no_common_chars[alg11] [gw4] [ 18%] PASSED tests/test_common.py::test_no_common_chars[alg11] tests/test_common.py::test_no_common_chars[alg12] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg12] tests/test_common.py::test_no_common_chars[alg13] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg13] tests/test_common.py::test_no_common_chars[alg14] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg14] tests/test_common.py::test_no_common_chars[alg15] [gw4] [ 19%] PASSED tests/test_common.py::test_no_common_chars[alg15] tests/test_common.py::test_no_common_chars[alg16] [gw1] [ 20%] FAILED tests/test_common.py::test_normalization_range[alg19] tests/test_common.py::test_normalization_range[alg20] [gw7] [ 20%] PASSED tests/test_common.py::test_normalization_monotonic[alg18] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg16] tests/test_common.py::test_normalization_monotonic[alg19] tests/test_common.py::test_no_common_chars[alg17] [gw4] [ 20%] PASSED tests/test_common.py::test_no_common_chars[alg17] tests/test_common.py::test_no_common_chars[alg18] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg18] tests/test_common.py::test_no_common_chars[alg19] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg19] tests/test_common.py::test_no_common_chars[alg20] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg20] tests/test_common.py::test_no_common_chars[alg21] [gw4] [ 21%] PASSED tests/test_common.py::test_no_common_chars[alg21] tests/test_common.py::test_no_common_chars[alg22] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg22] tests/test_common.py::test_no_common_chars[alg23] [gw0] [ 22%] FAILED tests/test_common.py::test_normalization_range[alg9] tests/test_common.py::test_normalization_range[alg10] [gw4] [ 22%] PASSED tests/test_common.py::test_no_common_chars[alg23] tests/test_common.py::test_empty[alg0] [gw4] [ 22%] PASSED tests/test_common.py::test_empty[alg0] tests/test_common.py::test_empty[alg1] [gw4] [ 23%] PASSED tests/test_common.py::test_empty[alg1] tests/test_common.py::test_empty[alg2] [gw4] [ 23%] PASSED tests/test_common.py::test_empty[alg2] tests/test_common.py::test_empty[alg3] [gw4] [ 23%] PASSED tests/test_common.py::test_empty[alg3] tests/test_common.py::test_empty[alg4] [gw4] [ 23%] PASSED tests/test_common.py::test_empty[alg4] tests/test_common.py::test_empty[alg5] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg5] tests/test_common.py::test_empty[alg6] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg6] tests/test_common.py::test_empty[alg7] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg7] tests/test_common.py::test_empty[alg8] [gw4] [ 24%] PASSED tests/test_common.py::test_empty[alg8] tests/test_common.py::test_empty[alg9] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg9] tests/test_common.py::test_empty[alg10] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg10] tests/test_common.py::test_empty[alg11] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg11] tests/test_common.py::test_empty[alg12] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg12] tests/test_common.py::test_empty[alg13] [gw4] [ 25%] PASSED tests/test_common.py::test_empty[alg13] tests/test_common.py::test_empty[alg14] [gw4] [ 26%] PASSED tests/test_common.py::test_empty[alg14] tests/test_common.py::test_empty[alg15] [gw4] [ 26%] PASSED tests/test_common.py::test_empty[alg15] [gw5] [ 26%] FAILED tests/test_common.py::test_normalization_same[alg21] tests/test_common.py::test_empty[alg16] [gw4] [ 26%] PASSED tests/test_common.py::test_empty[alg16] tests/test_common.py::test_empty[alg17] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg17] tests/test_common.py::test_empty[alg18] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg18] tests/test_common.py::test_empty[alg19] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg19] tests/test_common.py::test_empty[alg20] [gw4] [ 27%] PASSED tests/test_common.py::test_empty[alg20] tests/test_common.py::test_empty[alg21] tests/test_common.py::test_normalization_same[alg22] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg21] tests/test_common.py::test_empty[alg22] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg22] tests/test_common.py::test_empty[alg23] [gw4] [ 28%] PASSED tests/test_common.py::test_empty[alg23] tests/test_common.py::test_unequal_distance[alg0] [gw4] [ 28%] PASSED tests/test_common.py::test_unequal_distance[alg0] tests/test_common.py::test_unequal_distance[alg1] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg1] tests/test_common.py::test_unequal_distance[alg2] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg2] tests/test_common.py::test_unequal_distance[alg3] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg3] tests/test_common.py::test_unequal_distance[alg4] [gw4] [ 29%] PASSED tests/test_common.py::test_unequal_distance[alg4] [gw6] [ 30%] PASSED tests/test_common.py::test_normalization_monotonic[alg7] tests/test_common.py::test_unequal_distance[alg5] tests/test_common.py::test_normalization_monotonic[alg8] [gw4] [ 30%] PASSED tests/test_common.py::test_unequal_distance[alg5] tests/test_common.py::test_unequal_distance[alg6] [gw4] [ 30%] PASSED tests/test_common.py::test_unequal_distance[alg6] tests/test_common.py::test_unequal_distance[alg7] [gw4] [ 30%] PASSED tests/test_common.py::test_unequal_distance[alg7] tests/test_common.py::test_unequal_distance[alg8] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg8] tests/test_common.py::test_unequal_distance[alg9] [gw3] [ 31%] FAILED tests/test_common.py::test_normalization_by_one[alg19] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg9] tests/test_common.py::test_normalization_by_one[alg20] tests/test_common.py::test_unequal_distance[alg10] [gw4] [ 31%] PASSED tests/test_common.py::test_unequal_distance[alg10] tests/test_common.py::test_unequal_distance[alg11] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg11] tests/test_common.py::test_unequal_distance[alg12] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg12] tests/test_common.py::test_unequal_distance[alg13] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg13] tests/test_common.py::test_unequal_distance[alg14] [gw4] [ 32%] PASSED tests/test_common.py::test_unequal_distance[alg14] tests/test_common.py::test_unequal_distance[alg15] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg15] tests/test_common.py::test_unequal_distance[alg16] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg16] tests/test_common.py::test_unequal_distance[alg17] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg17] tests/test_common.py::test_unequal_distance[alg18] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg18] tests/test_common.py::test_unequal_distance[alg19] [gw4] [ 33%] PASSED tests/test_common.py::test_unequal_distance[alg19] tests/test_common.py::test_unequal_distance[alg20] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg20] tests/test_common.py::test_unequal_distance[alg21] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg21] tests/test_common.py::test_unequal_distance[alg22] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg22] tests/test_common.py::test_unequal_distance[alg23] [gw4] [ 34%] PASSED tests/test_common.py::test_unequal_distance[alg23] tests/test_external.py::test_compare[Jaro] [gw1] [ 35%] PASSED tests/test_common.py::test_normalization_range[alg20] tests/test_common.py::test_normalization_range[alg21] [gw2] [ 35%] FAILED tests/test_common.py::test_normalization_by_one[alg9] [gw5] [ 35%] PASSED tests/test_common.py::test_normalization_same[alg22] tests/test_common.py::test_normalization_same[alg23] tests/test_common.py::test_normalization_by_one[alg10] [gw7] [ 35%] PASSED tests/test_common.py::test_normalization_monotonic[alg19] tests/test_common.py::test_normalization_monotonic[alg20] [gw0] [ 36%] FAILED tests/test_common.py::test_normalization_range[alg10] tests/test_common.py::test_normalization_range[alg11] [gw6] [ 36%] PASSED tests/test_common.py::test_normalization_monotonic[alg8] tests/test_common.py::test_normalization_monotonic[alg9] [gw3] [ 36%] PASSED tests/test_common.py::test_normalization_by_one[alg20] tests/test_common.py::test_normalization_by_one[alg21] [gw4] [ 36%] PASSED tests/test_external.py::test_compare[Jaro] tests/test_external.py::test_compare[JaroWinkler] [gw1] [ 37%] FAILED tests/test_common.py::test_normalization_range[alg21] tests/test_common.py::test_normalization_range[alg22] [gw5] [ 37%] PASSED tests/test_common.py::test_normalization_same[alg23] tests/test_external.py::test_qval[2-JaroWinkler] [gw2] [ 37%] FAILED tests/test_common.py::test_normalization_by_one[alg10] tests/test_common.py::test_normalization_by_one[alg11] [gw3] [ 37%] FAILED tests/test_common.py::test_normalization_by_one[alg21] tests/test_common.py::test_normalization_by_one[alg22] [gw7] [ 38%] PASSED tests/test_common.py::test_normalization_monotonic[alg20] tests/test_common.py::test_normalization_monotonic[alg21] [gw6] [ 38%] PASSED tests/test_common.py::test_normalization_monotonic[alg9] tests/test_common.py::test_normalization_monotonic[alg10] [gw1] [ 38%] FAILED tests/test_common.py::test_normalization_range[alg22] tests/test_common.py::test_normalization_range[alg23] [gw4] [ 38%] PASSED tests/test_external.py::test_compare[JaroWinkler] tests/test_external.py::test_qval[None-Jaro] [gw5] [ 39%] FAILED tests/test_external.py::test_qval[2-JaroWinkler] tests/test_external.py::test_qval[3-Jaro] [gw0] [ 39%] FAILED tests/test_common.py::test_normalization_range[alg11] tests/test_compression/test_common.py::test_monotonicity[alg0] [gw0] [ 39%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg0] tests/test_compression/test_common.py::test_monotonicity[alg1] [gw0] [ 39%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg1] tests/test_compression/test_common.py::test_monotonicity[alg2] [gw0] [ 40%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg2] tests/test_compression/test_common.py::test_monotonicity[alg3] [gw0] [ 40%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg3] tests/test_compression/test_common.py::test_monotonicity[alg4] [gw0] [ 40%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg4] tests/test_compression/test_common.py::test_monotonicity[alg5] [gw0] [ 40%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg5] tests/test_compression/test_common.py::test_monotonicity[alg6] [gw0] [ 41%] PASSED tests/test_compression/test_common.py::test_monotonicity[alg6] tests/test_compression/test_common.py::test_simmetry[alg0] [gw3] [ 41%] FAILED tests/test_common.py::test_normalization_by_one[alg22] tests/test_common.py::test_normalization_by_one[alg23] [gw2] [ 41%] FAILED tests/test_common.py::test_normalization_by_one[alg11] tests/test_compression/test_common.py::test_simmetry[alg6] [gw6] [ 41%] PASSED tests/test_common.py::test_normalization_monotonic[alg10] tests/test_common.py::test_normalization_monotonic[alg11] [gw7] [ 41%] PASSED tests/test_common.py::test_normalization_monotonic[alg21] tests/test_common.py::test_normalization_monotonic[alg22] [gw2] [ 42%] FAILED tests/test_compression/test_common.py::test_simmetry[alg6] tests/test_compression/test_common.py::test_is_normalized[alg0] [gw0] [ 42%] FAILED tests/test_compression/test_common.py::test_simmetry[alg0] tests/test_compression/test_common.py::test_simmetry[alg1] [gw3] [ 42%] FAILED tests/test_common.py::test_normalization_by_one[alg23] tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] [gw4] [ 42%] FAILED tests/test_external.py::test_qval[None-Jaro] tests/test_external.py::test_qval[None-JaroWinkler] [gw3] [ 43%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-test-0.41421356237309503] tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] [gw3] [ 43%] PASSED tests/test_compression/test_sqrt_ncd.py::test_similarity[test-nani-1] tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor [gw4] [ 43%] FAILED tests/test_external.py::test_qval[None-JaroWinkler] tests/test_external.py::test_qval[1-Jaro] [gw4] [ 43%] FAILED tests/test_external.py::test_qval[1-Jaro] tests/test_external.py::test_qval[1-JaroWinkler] [gw5] [ 44%] FAILED tests/test_external.py::test_qval[3-Jaro] tests/test_external.py::test_qval[3-JaroWinkler] [gw5] [ 44%] FAILED tests/test_external.py::test_qval[3-JaroWinkler] tests/test_external.py::test_list_of_numbers[Jaro] [gw1] [ 44%] FAILED tests/test_common.py::test_normalization_range[alg23] tests/test_compression/test_common.py::test_normalized_by_one[alg4] [gw6] [ 44%] PASSED tests/test_common.py::test_normalization_monotonic[alg11] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] [gw6] [ 45%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-testit-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] [gw6] [ 45%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tesst-1] [gw7] [ 45%] PASSED tests/test_common.py::test_normalization_monotonic[alg22] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] tests/test_common.py::test_normalization_monotonic[alg23] [gw6] [ 45%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tet-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[cat-hat-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] [gw6] [ 46%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] [gw0] [ 46%] FAILED tests/test_compression/test_common.py::test_simmetry[alg1] tests/test_compression/test_common.py::test_simmetry[alg2] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ba-1] [gw3] [ 47%] FAILED tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor [gw6] [ 47%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-ac-1] [gw2] [ 48%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[cat-hat-1] tests/test_compression/test_common.py::test_is_normalized[alg1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] [gw6] [ 48%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[Niall-Neil-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[aluminum-Catalan-7] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ATCG-TAGC-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ba-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] [gw6] [ 49%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-cde-3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-ac-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[ab-bca-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] [gw6] [ 50%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[abcd-bdac-3] tests/test_edit/test_editex.py::test_distance[--0] [gw6] [ 50%] PASSED tests/test_edit/test_editex.py::test_distance[--0] tests/test_edit/test_editex.py::test_distance[nelson--12] [gw6] [ 51%] PASSED tests/test_edit/test_editex.py::test_distance[nelson--12] tests/test_edit/test_editex.py::test_distance[-neilsen-14] [gw6] [ 51%] PASSED tests/test_edit/test_editex.py::test_distance[-neilsen-14] tests/test_edit/test_editex.py::test_distance[ab-a-2] [gw6] [ 51%] PASSED tests/test_edit/test_editex.py::test_distance[ab-a-2] tests/test_edit/test_editex.py::test_distance[ab-c-4] [gw6] [ 51%] PASSED tests/test_edit/test_editex.py::test_distance[ab-c-4] tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[ALIE-ALI-1] tests/test_edit/test_editex.py::test_distance[-MARTHA-12] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance[-MARTHA-12] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params0-12] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] [gw6] [ 52%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params1-24] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] [gw1] [ 53%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg4] tests/test_compression/test_common.py::test_normalized_by_one[alg5] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params2-3] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params3-4] tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] [gw6] [ 53%] PASSED tests/test_edit/test_editex.py::test_distance_with_params[MARTHA-MARHTA-params4-5] tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] [gw6] [ 54%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] [gw6] [ 54%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[GATTACA-GCATGCU-0] [gw3] [ 54%] FAILED tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] [gw6] [ 54%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-TGACGSTGC-1.5] tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] [gw6] [ 55%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_05[AGACTAGTTAC-CGAGACGT-1] tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw1] [ 55%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg5] [gw6] [ 55%] PASSED tests/test_edit/test_gotoh.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] tests/test_edit/test_hamming.py::test_distance[test-text-1] [gw6] [ 55%] PASSED tests/test_edit/test_hamming.py::test_distance[test-text-1] tests/test_compression/test_common.py::test_normalized_by_one[alg6] tests/test_edit/test_hamming.py::test_distance[test-tset-2] [gw6] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tset-2] tests/test_edit/test_hamming.py::test_distance[test-qwe-4] [gw6] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-qwe-4] tests/test_edit/test_hamming.py::test_distance[test-testit-2] [gw6] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-testit-2] tests/test_edit/test_hamming.py::test_distance[test-tesst-2] [gw6] [ 56%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tesst-2] tests/test_edit/test_hamming.py::test_distance[test-tet-2] [gw6] [ 57%] PASSED tests/test_edit/test_hamming.py::test_distance[test-tet-2] tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] [gw6] [ 57%] PASSED tests/test_edit/test_jaro.py::test_distance[hello-haloa-0.7333333333333334] tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] [gw6] [ 57%] PASSED tests/test_edit/test_jaro.py::test_distance[fly-ant-0.0] tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] [gw5] [ 57%] PASSED tests/test_external.py::test_list_of_numbers[Jaro] [gw6] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[frog-fog-0.9166666666666666] tests/test_external.py::test_list_of_numbers[JaroWinkler] tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] [gw6] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[ATCG-TAGC-0.8333333333333334] tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] [gw6] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[MARTHA-MARHTA-0.944444444] tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] [gw6] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[DWAYNE-DUANE-0.822222222] tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] [gw6] [ 58%] PASSED tests/test_edit/test_jaro.py::test_distance[DIXON-DICKSONX-0.7666666666666666] tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] [gw6] [ 59%] PASSED tests/test_edit/test_jaro.py::test_distance[Sint-Pietersplein 6, 9000 Gent-Test 10, 1010 Brussel-0.5182539682539683] tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] [gw6] [ 59%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[elephant-hippo-0.44166666666666665] tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] [gw6] [ 59%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[fly-ant-0.0] tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] [gw6] [ 59%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[frog-fog-0.925] tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw6] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] [gw6] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DWAYNE-DUANE-0.84] tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] [gw6] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[DIXON-DICKSONX-0.8133333333333332] tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] [gw6] [ 60%] PASSED tests/test_edit/test_jaro_winkler.py::test_distance[duck donald-duck daisy-0.867272727272] tests/test_edit/test_levenshtein.py::test_distance[test-text-1] [gw6] [ 61%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-text-1] tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] [gw6] [ 61%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tset-2] tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] [gw6] [ 61%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-qwe-4] tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] [gw6] [ 61%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-testit-2] tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] [gw6] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tesst-1] tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] [gw6] [ 62%] PASSED tests/test_edit/test_levenshtein.py::test_distance[test-tet-1] tests/test_edit/test_matrix.py::test_distance[--1] [gw6] [ 62%] PASSED tests/test_edit/test_matrix.py::test_distance[--1] tests/test_edit/test_matrix.py::test_distance[-a-0] [gw6] [ 62%] PASSED tests/test_edit/test_matrix.py::test_distance[-a-0] tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] [gw6] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[abcd-abcd-1] tests/test_edit/test_matrix.py::test_distance[A-C--3] [gw6] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[A-C--3] tests/test_edit/test_matrix.py::test_distance[G-G-7] [gw6] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[G-G-7] tests/test_edit/test_matrix.py::test_distance[A-A-10] [gw6] [ 63%] PASSED tests/test_edit/test_matrix.py::test_distance[A-A-10] tests/test_edit/test_matrix.py::test_distance[T-A--4] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[T-A--4] tests/test_edit/test_matrix.py::test_distance[T-C-0] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[T-C-0] tests/test_edit/test_matrix.py::test_distance[A-G--1] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[A-G--1] tests/test_edit/test_matrix.py::test_distance[C-T-0] [gw6] [ 64%] PASSED tests/test_edit/test_matrix.py::test_distance[C-T-0] tests/test_edit/test_mlipns.py::test_distance[--1] [gw6] [ 65%] PASSED tests/test_edit/test_mlipns.py::test_distance[--1] tests/test_edit/test_mlipns.py::test_distance[a--0] [gw6] [ 65%] PASSED tests/test_edit/test_mlipns.py::test_distance[a--0] tests/test_edit/test_mlipns.py::test_distance[-a-0] [gw6] [ 65%] PASSED tests/test_edit/test_mlipns.py::test_distance[-a-0] tests/test_edit/test_mlipns.py::test_distance[a-a-1] [gw6] [ 65%] PASSED tests/test_edit/test_mlipns.py::test_distance[a-a-1] tests/test_edit/test_mlipns.py::test_distance[ab-a-1] [gw6] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[ab-a-1] tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] [gw6] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abc-1] tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] [gw6] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[abc-abcde-1] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] [gw6] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdeg-1] tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] [gw6] [ 66%] PASSED tests/test_edit/test_mlipns.py::test_distance[abcg-abcdefg-0] tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] [gw6] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[Tomato-Tamato-1] tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] [gw6] [ 67%] PASSED tests/test_edit/test_mlipns.py::test_distance[ato-Tam-1] tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] [gw4] [ 67%] FAILED tests/test_external.py::test_qval[1-JaroWinkler] [gw6] [ 67%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-16] tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_external.py::test_qval[2-Jaro] [gw6] [ 68%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident[GATTACA-GCATGCU-0] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] [gw6] [ 68%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC--5] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] [gw6] [ 68%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[CGATATCAG-TGACGSTGC-0] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] [gw6] [ 68%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC-1] [gw7] [ 69%] PASSED tests/test_common.py::test_normalization_monotonic[alg23] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] [gw7] [ 69%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bc-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] [gw7] [ 69%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[ab-bca-3] [gw6] [ 69%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT-0] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] [gw3] [ 70%] FAILED tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor [gw6] [ 70%] PASSED tests/test_edit/test_strcmp95.py::test_distance[MARTHA-MARHTA-0.9611111111111111] [gw7] [ 70%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[abcd-bdac-4] tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] [gw6] [ 70%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DWAYNE-DUANE-0.873] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] [gw7] [ 71%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-text-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] [gw7] [ 71%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tset-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] [gw7] [ 71%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-qwy-4] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] [gw7] [ 71%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-testit-2] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] [gw7] [ 72%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tesst-1] tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] [gw0] [ 72%] FAILED tests/test_compression/test_common.py::test_simmetry[alg2] tests/test_compression/test_common.py::test_simmetry[alg3] tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] [gw6] [ 72%] PASSED tests/test_edit/test_strcmp95.py::test_distance[DIXON-DICKSONX-0.839333333] [gw7] [ 72%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_unrestricted[test-tet-1] tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] [gw6] [ 73%] PASSED tests/test_edit/test_strcmp95.py::test_distance[TEST-TEXT-0.9066666666666666] tests/test_phonetic/test_editex.py::test_distance[--0] [gw7] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[-neilsen-14] tests/test_phonetic/test_editex.py::test_distance[ab-a-2] [gw6] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[--0] [gw7] [ 73%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-a-2] [gw0] [ 74%] FAILED tests/test_compression/test_common.py::test_simmetry[alg3] tests/test_phonetic/test_editex.py::test_distance[ab-c-4] tests/test_compression/test_common.py::test_simmetry[alg4] [gw7] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[ab-c-4] tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_distance[nelson--12] [gw7] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson-neilsen-2] [gw6] [ 74%] PASSED tests/test_phonetic/test_editex.py::test_distance[nelson--12] tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] [gw7] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] [gw6] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-niall-1] tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] [gw7] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-neal-1] [gw6] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[niall-nihal-2] tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] [gw7] [ 75%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihal-niall-2] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[nihl-neal-3] tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[cat-hat-2] tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] [gw7] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[neal-nihl-3] [gw6] [ 76%] PASSED tests/test_phonetic/test_editex.py::test_distance[ATCG-TAGC-6] tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] [gw7] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[Niall-Neil-2] tests/test_phonetic/test_editex.py::test_local[--0] tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] [gw6] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_local[--0] [gw7] [ 77%] PASSED tests/test_phonetic/test_editex.py::test_distance[aluminum-Catalan-12] [gw4] [ 77%] FAILED tests/test_external.py::test_qval[2-Jaro] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] tests/test_phonetic/test_editex.py::test_local[ab-a-2] [gw7] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-a-2] [gw4] [ 78%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-TGACGSTGC--7] tests/test_phonetic/test_editex.py::test_local[nelson--12] [gw6] [ 78%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson--12] tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw4] [ 78%] PASSED tests/test_edit/test_needleman_wunsch.py::test_distance_ident_with_gap_5[AGACTAGTTAC-CGAGACGT--15] [gw2] [ 79%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg1] tests/test_phonetic/test_editex.py::test_local[ab-c-2] tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] [gw4] [ 79%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_matrix[AGACTAGTTAC-CGAGACGT-26] tests/test_compression/test_common.py::test_is_normalized[alg2] [gw7] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[ab-c-2] tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] [gw7] [ 79%] PASSED tests/test_phonetic/test_editex.py::test_local[nelson-neilsen-2] tests/test_phonetic/test_editex.py::test_local[-neilsen-14] tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] [gw4] [ 80%] PASSED tests/test_edit/test_smith_waterman.py::test_distance_ident[GATTACA-GCATGCU-0] [gw6] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[-neilsen-14] tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] tests/test_phonetic/test_editex.py::test_local[niall-neal-1] tests/test_phonetic/test_editex.py::test_local[neal-niall-1] [gw4] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-niall-1] [gw7] [ 80%] PASSED tests/test_phonetic/test_editex.py::test_local[neilsen-nelson-2] [gw6] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-neal-1] tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] [gw4] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[neal-nihl-3] [gw7] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[niall-nihal-2] [gw6] [ 81%] PASSED tests/test_phonetic/test_editex.py::test_local[nihal-niall-2] tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] [gw7] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[abcd-abcd-abcd] [gw6] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[ab-cd-] tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] [gw4] [ 82%] PASSED tests/test_phonetic/test_editex.py::test_local[nihl-neal-3] tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] [gw7] [ 82%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[thisisatest-testing123testing-tsitest] [gw6] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[DIXON-DICKSONX-DION] tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] [gw7] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[random exponential-layer activation-ratia] [gw4] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[test-text-tet] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] [gw7] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs0-] tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] [gw4] [ 83%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa] [gw6] [ 84%] PASSED tests/test_sequence/test_lcsseq.py::test_distance[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-] tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] [gw1] [ 84%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] [gw4] [ 84%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ab-abcd-ab] [gw6] [ 84%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs1-a] [gw1] [ 85%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-test-1] tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] [gw1] [ 85%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[aaa-bbb-0] [gw4] [ 85%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-bc-bc] tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] [gw1] [ 85%] PASSED tests/test_compression/test_entropy_ncd.py::test_similarity[test-nani-0.6] [gw6] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[bc-abcd-bc] tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] [gw4] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd0] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] [gw6] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-cd-cd1] [gw4] [ 86%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ef-] [gw7] [ 87%] PASSED tests/test_sequence/test_lcsseq.py::test_distance_multiseq[seqs2-tet] tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] [gw4] [ 87%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST-TEST] tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] [gw6] [ 87%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[ef-abcd-] [gw7] [ 87%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[abcd-ab-ab] [gw4] [ 88%] PASSED tests/test_sequence/test_lcsstr.py::test_distance[TEST-MYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTESTMYTEST-TEST] tests/test_token/test_bag.py::test_distance[qwe-erty-3] tests/test_token/test_bag.py::test_distance[qwe-qwe-0] [gw6] [ 88%] PASSED tests/test_token/test_bag.py::test_distance[qwe-erty-3] [gw7] [ 88%] PASSED tests/test_token/test_bag.py::test_distance[qwe-qwe-0] tests/test_token/test_bag.py::test_distance[qwe-rtys-4] tests/test_token/test_bag.py::test_distance[qwe-ewq-0] [gw6] [ 88%] PASSED tests/test_token/test_bag.py::test_distance[qwe-rtys-4] [gw4] [ 89%] PASSED tests/test_token/test_bag.py::test_distance[qwe-ewq-0] tests/test_token/test_cosine.py::test_distance[test-text-0.75] tests/test_token/test_jaccard.py::test_distance[test-text-0.6] tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] [gw6] [ 89%] PASSED tests/test_token/test_jaccard.py::test_distance[test-text-0.6] [gw7] [ 89%] PASSED tests/test_token/test_cosine.py::test_distance[test-text-0.75] [gw4] [ 89%] PASSED tests/test_token/test_cosine.py::test_distance[nelson-neilsen-0.7715167498104595] tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] tests/test_token/test_jaccard.py::test_compare_with_tversky [gw6] [ 90%] PASSED tests/test_token/test_jaccard.py::test_distance[decide-resize-0.3333333333333333] tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] [gw7] [ 90%] PASSED tests/test_token/test_jaccard.py::test_distance[nelson-neilsen-0.625] tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] [gw7] [ 90%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left0-right0-0.805] tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] [gw7] [ 90%] PASSED tests/test_token/test_overlap.py::test_distance[testme-textthis-0.6666666666666666] tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] [gw7] [ 91%] PASSED tests/test_token/test_overlap.py::test_distance[nelson-neilsen-0.8333333333333334] [gw4] [ 91%] FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky [gw5] [ 91%] FAILED tests/test_external.py::test_list_of_numbers[JaroWinkler] tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] tests/test_token/test_sorensen.py::test_distance[test-text-0.75] [gw5] [ 91%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-test-1] tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] [gw5] [ 91%] PASSED tests/test_compression/test_arith_ncd.py::test_similarity[test-nani-2.1666666666666665] [gw7] [ 92%] PASSED tests/test_token/test_sorensen.py::test_distance[test-text-0.75] tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] [gw0] [ 92%] FAILED tests/test_compression/test_common.py::test_simmetry[alg4] tests/test_compression/test_arith_ncd.py::test_make_probs [gw5] [ 92%] PASSED tests/test_compression/test_arith_ncd.py::test_make_probs [gw4] [ 92%] PASSED tests/test_token/test_monge_elkan.py::test_similarity[left1-right1-0.7866666666666667] tests/test_compression/test_arith_ncd.py::test_arith_output [gw5] [ 93%] PASSED tests/test_compression/test_arith_ncd.py::test_arith_output tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] [gw5] [ 93%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-test-0.6] tests/test_token/test_sorensen.py::test_compare_with_tversky tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] tests/test_compression/test_common.py::test_simmetry[alg5] [gw5] [ 93%] PASSED tests/test_compression/test_bwtrle_ncd.py::test_similarity[test-nani-0.8] tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] [gw5] [ 93%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-test-0.08] tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw5] [ 94%] PASSED tests/test_compression/test_bz2_ncd.py::test_similarity[test-nani-0.16] [gw2] [ 94%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] tests/test_compression/test_common.py::test_is_normalized[alg3] [gw1] [ 94%] PASSED tests/test_compression/test_entropy_ncd.py::test_simmetry_compressor tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor [gw3] [ 94%] PASSED tests/test_compression/test_sqrt_ncd.py::test_distributivity_compressor tests/test_compression/test_sqrt_ncd.py::test_normalization_range [gw0] [ 95%] FAILED tests/test_compression/test_common.py::test_simmetry[alg5] [gw4] [ 95%] FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set [gw2] [ 95%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg3] tests/test_compression/test_common.py::test_is_normalized[alg4] [gw1] [ 95%] FAILED tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor [gw2] [ 96%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg4] tests/test_compression/test_common.py::test_is_normalized[alg5] [gw3] [ 96%] FAILED tests/test_compression/test_sqrt_ncd.py::test_normalization_range tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] [gw2] [ 96%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg5] tests/test_compression/test_common.py::test_is_normalized[alg6] [gw7] [ 96%] FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky [gw3] [ 97%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-text-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] [gw3] [ 97%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-tset-1] tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] [gw3] [ 97%] PASSED tests/test_edit/test_damerau_levenshtein.py::test_distance_restricted[test-qwy-4] [gw6] [ 97%] FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set tests/test_token/test_overlap.py::test_distance[test-text-0.75] [gw6] [ 98%] PASSED tests/test_token/test_overlap.py::test_distance[test-text-0.75] [gw2] [ 98%] FAILED tests/test_compression/test_common.py::test_is_normalized[alg6] tests/test_compression/test_common.py::test_normalized_by_one[alg0] [gw1] [ 98%] PASSED tests/test_compression/test_entropy_ncd.py::test_monotonicity_compressor tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor [gw2] [ 98%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg0] tests/test_compression/test_common.py::test_normalized_by_one[alg1] [gw1] [ 99%] PASSED tests/test_compression/test_entropy_ncd.py::test_distributivity_compressor tests/test_compression/test_entropy_ncd.py::test_normalization_range [gw2] [ 99%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg1] tests/test_compression/test_common.py::test_normalized_by_one[alg2] [gw1] [ 99%] FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range [gw2] [ 99%] FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg2] tests/test_compression/test_common.py::test_normalized_by_one[alg3] [gw2] [100%] PASSED tests/test_compression/test_common.py::test_normalized_by_one[alg3] =================================== FAILURES =================================== ________________________ test_normalization_same[alg12] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '𤐗æð\x14»ÿe\x93V ÷W\x99ºÚRL\r7Æ' alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𤐗æð\x14»ÿe\x93V ÷W\x99ºÚRL\r7Æ', Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 11038136.605449017, result = None finish = 11038137.00052912, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=395080) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 395.08ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 93 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 395.08ms, which exceeds the deadline of 200.00ms'), "args = ('𤐗æð\\x14»ÿe\\x93V ÷W\\x99ºÚ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='𤐗æð\x14»ÿe\x93V ÷W\x99ºÚRL\r7Æ', alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}), E text='𤐗æð\x14»ÿe\x93V ÷W\x99ºÚRL\r7Æ', E ) E Unreliable test timings! On an initial run, this test took 395.08ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.31 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg0] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '', alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', Bag({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 11038137.521416122, result = None finish = 11038137.868279023, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=346863) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 346.86ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 346.86ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', Bag({'qval': 1, 'ex...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='0', right='', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Bag({'qval': 1, 'external': True}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 346.86ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.39 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg1] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '<7|\r', right = '\U000b5d1c\U000352fa`' alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('<7|\r', '\U000b5d1c\U000352fa`', Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038174.24806959, result = None finish = 11038174.632560693, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=384491) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 384.49ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 52 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 384.49ms, which exceeds the deadline of 200.00ms'), "args = ('<7|\\r', '\\U000b5d1c\\U000...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='<7|\r', right='\U000b5d1c\U000352fa`', alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}), E left='<7|\r', E right='\U000b5d1c\U000352fa`', E ) E Unreliable test timings! On an initial run, this test took 384.49ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.99 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg0] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = '`\x96Ç\U000f940dì', alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('`\x96Ç\U000f940dì', Bag({'qval': 1, 'external': True})), kwargs = {} initial_draws = 1, start = 11038166.470726855, result = None finish = 11038167.017624157, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=546897) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 546.90ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 33 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 546.90ms, which exceeds the deadline of 200.00ms'), "args = ('`\\x96Ç\\U000f940dì', Bag({...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='`\x96Ç\U000f940dì', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Bag({'qval': 1, 'external': True}), E text='`\x96Ç\U000f940dì', E ) E Unreliable test timings! On an initial run, this test took 546.90ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 77.18 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg1] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.19 seconds (0 invalid ones and 2 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:71: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(93124846601142084896690933115434316594) to this test or run pytest with --hypothesis-seed=93124846601142084896690933115434316594 to reproduce this failure. _______________________ test_normalization_range[alg12] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = 'Ä@È\x1d\x87\x00\x95\U00092368', right = '\x91\x94®\\\U00086454àõïÏì\x8e' alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Ä@È\x1d\x87\x00\x95\U00092368', '\x91\x94®\\\U00086454àõïÏì\x8e', Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038170.063901773, result = None finish = 11038170.403997973, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=340096) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 340.10ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 111 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 340.10ms, which exceeds the deadline of 200.00ms'), "args = ('Ä@È\\x1d\\x87\\x00\\x95\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='Ä@È\x1d\x87\x00\x95\U00092368', right='\x91\x94®\\\U00086454àõïÏì\x8e', alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}), E left='Ä@È\x1d\x87\x00\x95\U00092368', E right='\x91\x94®\\\U00086454àõïÏì\x8e', E ) E Unreliable test timings! On an initial run, this test took 340.10ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.33 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg12] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '\x89\U000327b0\U0005c7b765\x04\x81\U00046d48}\U00088be9' right = '\x8fIß' alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x89\U000327b0\U0005c7b765\x04\x81\U00046d48}\U00088be9', '\x8fIß', Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038169.150162168, result = None finish = 11038169.602222169, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=452060) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 452.06ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 99 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 452.06ms, which exceeds the deadline of 200.00ms'), "args = ('\\x89\\U000327b0\\U0005c7b7...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x89\U000327b0\U0005c7b765\x04\x81\U00046d48}\U00088be9', right='\x8fIß', alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Tversky({'qval': 1, 'ks': repeat(1), 'bias': None, 'as_set': False, 'external': True}), E left='\x89\U000327b0\U0005c7b765\x04\x81\U00046d48}\U00088be9', E right='\x8fIß', E ) E Unreliable test timings! On an initial run, this test took 452.06ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.23 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg0] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '𰦴𰦴', right = 'z𰦴', alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𰦴𰦴', 'z𰦴', Bag({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 11038195.069040086, result = None finish = 11038195.408789488, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=339749) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 339.75ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Bag({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 20 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 339.75ms, which exceeds the deadline of 200.00ms'), "args = ('𰦴𰦴', 'z𰦴', Bag({'qval': 1, ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='𰦴𰦴', right='z𰦴', alg=Bag({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Bag({'qval': 1, 'external': True}), E left='𰦴𰦴', E right='z𰦴', E ) E Unreliable test timings! On an initial run, this test took 339.75ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 35.38 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg1] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 alg = Hamming({'qval': 1, 'test_func': , 'truncate': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.29 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:60: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(316147234047087791302449136173579517105) to this test or run pytest with --hypothesis-seed=316147234047087791302449136173579517105 to reproduce this failure. ________________________ test_normalization_range[alg2] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '±⭾\t\x0b?-\x84,', right = 'öé°ßªy\\' alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('±⭾\t\x0b?-\x84,', 'öé°ßªy\\', Levenshtein({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11038194.560763285, result = None finish = 11038202.67614542, internal_draw_time = 0 runtime = datetime.timedelta(seconds=8, microseconds=115382) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 8115.38ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 74 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 8115.38ms, which exceeds the deadline of 200.00ms'), "args = ('±⭾\\t\\x0b?-\\x84,', 'öé°ß...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='±⭾\t\x0b?-\x84,', right='öé°ßªy\\', alg=Levenshtein({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Levenshtein({'qval': 1, 'test_func': , 'external': True}), E left='±⭾\t\x0b?-\x84,', E right='öé°ßªy\\', E ) E Unreliable test timings! On an initial run, this test took 8115.38ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.68 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg2] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = 'Z', right = '[â' alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('Z', '[â', Levenshtein({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11038220.883236904, result = None finish = 11038226.860899532, internal_draw_time = 0 runtime = datetime.timedelta(seconds=5, microseconds=977663) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 5977.66ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Levenshtein({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 19 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 5977.66ms, which exceeds the deadline of 200.00ms'), "args = ('Z', '[â', Levenshtein({'qv...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='Z', right='[â', alg=Levenshtein({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Levenshtein({'qval': 1, 'test_func': , 'external': True}), E left='Z', E right='[â', E ) E Unreliable test timings! On an initial run, this test took 5977.66ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.62 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg13] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = 'UUø', right = '¦' alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('UUø', '¦', Overlap({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038222.15966161, result = None finish = 11038223.465710817, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=306049) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1306.05ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 21 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1306.05ms, which exceeds the deadline of 200.00ms'), "args = ('UUø', '¦', Overlap({'qval'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='UUø', right='¦', alg=Overlap({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Overlap({'qval': 1, 'as_set': False, 'external': True}), E left='UUø', E right='¦', E ) E Unreliable test timings! On an initial run, this test took 1306.05ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.33 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg14] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '/\x87\U0004e5c9"' alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('/\x87\U0004e5c9"', Cosine({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 1, start = 11038226.059880828, result = None finish = 11038226.401643328, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=341762) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 341.76ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Cosine({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 19 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 341.76ms, which exceeds the deadline of 200.00ms'), 'args = (\'/\\x87\\U0004e5c9"\', Cosi...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='/\x87\U0004e5c9"', alg=Cosine({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Cosine({'qval': 1, 'as_set': False, 'external': True}), E text='/\x87\U0004e5c9"', E ) E Unreliable test timings! On an initial run, this test took 341.76ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.30 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg13] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '\U0007b16e\U0007e1e5\U00072c49', right = '\x85Æ\x05\U000f507c' alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0007b16e\U0007e1e5\U00072c49', '\x85Æ\x05\U000f507c', Overlap({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038246.09755762, result = None finish = 11038246.43737872, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=339821) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 339.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Overlap({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 61 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 339.82ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0007b16e\\U0007e1e5\\U00...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U0007b16e\U0007e1e5\U00072c49', right='\x85Æ\x05\U000f507c', alg=Overlap({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Overlap({'qval': 1, 'as_set': False, 'external': True}), E left='\U0007b16e\U0007e1e5\U00072c49', E right='\x85Æ\x05\U000f507c', E ) E Unreliable test timings! On an initial run, this test took 339.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.39 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg3] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\U000f66cb¢¤' alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U000f66cb¢¤', DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True})) kwargs = {}, initial_draws = 2, start = 11038251.508565444, result = None finish = 11038252.009387245, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=500822) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 500.82ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 30 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 500.82ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U000f66cb¢¤', Damerau...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='\U000f66cb¢¤', alg=DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), E left='', E right='\U000f66cb¢¤', E ) E Unreliable test timings! On an initial run, this test took 500.82ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.35 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg15] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = 'P', right = '\x0eP' alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('P', '\x0eP', StrCmp95({'long_strings': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038296.079657547, result = None finish = 11038296.42336165, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=343704) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 343.70ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 13 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 343.70ms, which exceeds the deadline of 200.00ms'), "args = ('P', '\\x0eP', StrCmp95({'lo...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='P', right='\x0eP', alg=StrCmp95({'long_strings': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=StrCmp95({'long_strings': False, 'external': True}), E left='P', E right='\x0eP', E ) E Unreliable test timings! On an initial run, this test took 343.70ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.95 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg16] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '8##' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('8##', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 1, start = 11038299.069509363, result = None finish = 11038299.365595864, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=296087) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 296.09ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 13 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 296.09ms, which exceeds the deadline of 200.00ms'), "args = ('8##', MongeElkan({'algorith...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='8##', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E text='8##', E ) E Unreliable test timings! On an initial run, this test took 296.09ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.89 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg5] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = '\U000508f2>' alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000508f2>', JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) kwargs = {}, initial_draws = 1, start = 11038304.885859288, result = None finish = 11038305.405176193, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=519317) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 519.32ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 10 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 519.32ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000508f2>', JaroWinkler(...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U000508f2>', alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}), E text='\U000508f2>', E ) E Unreliable test timings! On an initial run, this test took 519.32ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.33 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg15] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\U000437df·mX\x07ì1\U0007d43e»D\x08' alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U000437df·mX\x07ì1\U0007d43e»D\x08', StrCmp95({'long_strings': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038304.300924085, result = None finish = 11038304.809846189, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=508922) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 508.92ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = StrCmp95({'long_strings': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 58 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 508.92ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U000437df·mX\\x07ì1\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='', right='\U000437df·mX\x07ì1\U0007d43e»D\x08', alg=StrCmp95({'long_strings': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=StrCmp95({'long_strings': False, 'external': True}), E left='', E right='\U000437df·mX\x07ì1\U0007d43e»D\x08', E ) E Unreliable test timings! On an initial run, this test took 508.92ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.51 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg5] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '6[\x06TJ\x88;𱫜%\x92\x04K\xad\x03Tÿ', right = '\U0008117f' alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('6[\x06TJ\x88;𱫜%\x92\x04K\xad\x03Tÿ', '\U0008117f', JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038313.630478527, result = None finish = 11038314.00489143, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=374413) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 374.41ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 110 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 374.41ms, which exceeds the deadline of 200.00ms'), "args = ('6[\\x06TJ\\x88;𱫜%\\x92\\x04...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='6[\x06TJ\x88;𱫜%\x92\x04K\xad\x03Tÿ', right='\U0008117f', alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=JaroWinkler({'qval': 1, 'long_tolerance': False, 'winklerize': True, 'external': True}), E left='6[\x06TJ\x88;𱫜%\x92\x04K\xad\x03Tÿ', E right='\U0008117f', E ) E Unreliable test timings! On an initial run, this test took 374.41ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.91 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg16] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '\x98\U000d195aÙ\U000b487aø\U000c4bba', right = '鈵\x18\x8eâz' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x98\U000d195aÙ\U000b487aø\U000c4bba', '鈵\x18\x8eâz', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_..._ident at 0xffffff9b925800>, 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038336.744457833, result = None finish = 11038337.411111336, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=666654) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 666.65ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 61 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 666.65ms, which exceeds the deadline of 200.00ms'), "args = ('\\x98\\U000d195aÙ\\U000b487...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\x98\U000d195aÙ\U000b487aø\U000c4bba', right='鈵\x18\x8eâz', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='\x98\U000d195aÙ\U000b487aø\U000c4bba', E right='鈵\x18\x8eâz', E ) E Unreliable test timings! On an initial run, this test took 666.65ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 10.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg17] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '퐁', alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('퐁', MRA({'qval': 1, 'external': True})), kwargs = {}, initial_draws = 1 start = 11038329.909303203, result = None, finish = 11038330.413280904 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=503978) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 503.98ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 9 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 503.98ms, which exceeds the deadline of 200.00ms'), "args = ('퐁', MRA({'qval': 1, 'extern...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='퐁', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=MRA({'qval': 1, 'external': True}), E text='퐁', E ) E Unreliable test timings! On an initial run, this test took 503.98ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.39 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg18] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '0' alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', Prefix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 1, start = 11038354.725784516, result = None finish = 11038355.019274218, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=293490) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 293.49ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Prefix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 293.49ms, which exceeds the deadline of 200.00ms'), "args = ('0', Prefix({'qval': 1, 'sim...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='0', alg=Prefix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Prefix({'qval': 1, 'sim_test': }), E text='0', E ) E Unreliable test timings! On an initial run, this test took 293.49ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.56 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg6] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '#', right = '=e\x1bDøë' alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('#', '=e\x1bDøë', MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038352.038640805, result = None finish = 11038352.425759805, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=387119) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 387.12ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 37 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 387.12ms, which exceeds the deadline of 200.00ms'), "args = ('#', '=e\\x1bDøë', MLIPNS({'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='#', right='=e\x1bDøë', alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}), E left='#', E right='=e\x1bDøë', E ) E Unreliable test timings! On an initial run, this test took 387.12ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.32 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg17] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '\U000ab37eK', right = '\x17', alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000ab37eK', '\x17', MRA({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 11038353.28476391, result = None finish = 11038354.009220012, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=724456) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 724.46ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 18 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 724.46ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000ab37eK', '\\x17', MRA...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\U000ab37eK', right='\x17', alg=MRA({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MRA({'qval': 1, 'external': True}), E left='\U000ab37eK', E right='\x17', E ) E Unreliable test timings! On an initial run, this test took 724.46ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.78 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg16] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '𐧯\U000aba15', right = '±\x06' alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𐧯\U000aba15', '±\x06', MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038335.625682129, result = None finish = 11038345.480509873, internal_draw_time = 0 runtime = datetime.timedelta(seconds=9, microseconds=854828) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 9854.83ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 27 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 9854.83ms, which exceeds the deadline of 200.00ms'), "args = ('𐧯\\U000aba15', '±\\x06', M...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='𐧯\U000aba15', right='±\x06', alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=MongeElkan({'algorithm': DamerauLevenshtein({'qval': 1, 'test_func': , 'external': True, 'restricted': True}), 'symmetric': False, 'qval': 1, 'external': True}), E left='𐧯\U000aba15', E right='±\x06', E ) E Unreliable test timings! On an initial run, this test took 9854.83ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.27 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg6] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = 'G\U000e5a92', right = '\x8by' alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('G\U000e5a92', '\x8by', MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038352.848721107, result = None finish = 11038353.21749521, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=368774) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 368.77ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 23 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 368.77ms, which exceeds the deadline of 200.00ms'), "args = ('G\\U000e5a92', '\\x8by', ML...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='G\U000e5a92', right='\x8by', alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=MLIPNS({'qval': 1, 'threshold': 0.25, 'maxmismatches': 2, 'external': True}), E left='G\U000e5a92', E right='\x8by', E ) E Unreliable test timings! On an initial run, this test took 368.77ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.27 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg17] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = MRA({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('²C', '\x85\U0004119e\x15', MRA({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038383.87890265, result = None finish = 11038384.409576952, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=530674) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 530.67ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalization_by_one( E alg=MRA({'qval': 1, 'external': True}), E left='²C', E right='\x85\U0004119e\x15', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ________________________ test_normalization_same[alg7] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = '\U00094f75\xad' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00094f75\xad', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 11038362.479310952, result = None finish = 11038363.009819053, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=530508) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 530.51ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 10 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 530.51ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00094f75\\xad', LCSSeq({...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U00094f75\xad', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E text='\U00094f75\xad', E ) E Unreliable test timings! On an initial run, this test took 530.51ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.42 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg7] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '¹âëD\x02ª~\U00074ae9µµ*_«' right = '\x86H\U0006e788\U00090acbtýÄÒa<\x13\U000d3be2' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¹âëD\x02ª~\U00074ae9µµ*_«', '\x86H\U0006e788\U00090acbtýÄÒa<\x13\U000d3be2', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11038372.409176698, result = None finish = 11038372.8135527, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=404376) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 404.38ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 135 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 404.38ms, which exceeds the deadline of 200.00ms'), "args = ('¹âëD\\x02ª~\\U00074ae9µµ*_«...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='¹âëD\x02ª~\U00074ae9µµ*_«', right='\x86H\U0006e788\U00090acbtýÄÒa<\x13\U000d3be2', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E left='¹âëD\x02ª~\U00074ae9µµ*_«', E right='\x86H\U0006e788\U00090acbtýÄÒa<\x13\U000d3be2', E ) E Unreliable test timings! On an initial run, this test took 404.38ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 7.96 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg8] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = '\U00052d8e', alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U00052d8e', LCSStr({'qval': 1, 'external': True})), kwargs = {} initial_draws = 1, start = 11038390.30004338, result = None finish = 11038390.57374278, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=273699) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 273.70ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 7 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 273.70ms, which exceeds the deadline of 200.00ms'), "args = ('\\U00052d8e', LCSStr({'qval...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='\U00052d8e', alg=LCSStr({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=LCSStr({'qval': 1, 'external': True}), E text='\U00052d8e', E ) E Unreliable test timings! On an initial run, this test took 273.70ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.90 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg7] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '\x1d$\x8a\x0c\x1b', right = '' alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x1d$\x8a\x0c\x1b', '', LCSSeq({'qval': 1, 'test_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11038383.309562348, result = None finish = 11038383.80857795, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=499016) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 499.02ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSSeq({'qval': 1, 'test_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 26 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 499.02ms, which exceeds the deadline of 200.00ms'), "args = ('\\x1d$\\x8a\\x0c\\x1b', '',...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='\x1d$\x8a\x0c\x1b', right='', alg=LCSSeq({'qval': 1, 'test_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=LCSSeq({'qval': 1, 'test_func': , 'external': True}), E left='\x1d$\x8a\x0c\x1b', E right='', E ) E Unreliable test timings! On an initial run, this test took 499.02ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.54 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg19] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = 'àànqsº¬\x8eà ÌÍáÅ' alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('àànqsº¬\x8eà ÌÍáÅ', Postfix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 1, start = 11038381.68193134, result = None finish = 11038382.232034441, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=550103) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 550.10ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 69 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 550.10ms, which exceeds the deadline of 200.00ms'), "args = ('àànqsº¬\\x8eà ÌÍáÅ', Postfi...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='àànqsº¬\x8eà ÌÍáÅ', alg=Postfix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=Postfix({'qval': 1, 'sim_test': }), E text='àànqsº¬\x8eà ÌÍáÅ', E ) E Unreliable test timings! On an initial run, this test took 550.10ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.29 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg9] _________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 text = '', alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', RatcliffObershelp({'qval': 1, 'external': True})), kwargs = {} initial_draws = 1, start = 11038401.509309232, result = None finish = 11038401.808892932, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=299584) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 299.58ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 1 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 299.58ms, which exceeds the deadline of 200.00ms'), "args = ('', RatcliffObershelp({'qval...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='', alg=RatcliffObershelp({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=RatcliffObershelp({'qval': 1, 'external': True}), E text='', E ) E Unreliable test timings! On an initial run, this test took 299.58ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.26 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_same[alg10] ________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 2.05 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:71: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(186749257065859625039702415236667125061) to this test or run pytest with --hypothesis-seed=186749257065859625039702415236667125061 to reproduce this failure. _______________________ test_normalization_by_one[alg8] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = 'ES¡ËSF·', right = '¾', alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ES¡ËSF·', '¾', LCSStr({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 11038414.688398892, result = None finish = 11038415.008832293, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=320433) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 320.43ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 40 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 320.43ms, which exceeds the deadline of 200.00ms'), "args = ('ES¡ËSF·', '¾', LCSStr({'qva...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='ES¡ËSF·', right='¾', alg=LCSStr({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=LCSStr({'qval': 1, 'external': True}), E left='ES¡ËSF·', E right='¾', E ) E Unreliable test timings! On an initial run, this test took 320.43ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.85 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg8] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '܁𰤬', right = '\U00010aa8𰤬', alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('܁𰤬', '\U00010aa8𰤬', LCSStr({'qval': 1, 'external': True})), kwargs = {} initial_draws = 2, start = 11038428.656986156, result = None finish = 11038429.211703455, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=554717) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 554.72ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = LCSStr({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 26 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 554.72ms, which exceeds the deadline of 200.00ms'), "args = ('܁𰤬', '\\U00010aa8𰤬', LCSStr...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='܁𰤬', right='\U00010aa8𰤬', alg=LCSStr({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=LCSStr({'qval': 1, 'external': True}), E left='܁𰤬', E right='\U00010aa8𰤬', E ) E Unreliable test timings! On an initial run, this test took 554.72ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.81 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg19] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '𠬁Õ' alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '𠬁Õ', Postfix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 2, start = 11038426.521530444, result = None finish = 11038427.006840147, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=485310) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 485.31ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 14 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 485.31ms, which exceeds the deadline of 200.00ms'), "args = ('', '𠬁Õ', Postfix({'qval': 1...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='𠬁Õ', alg=Postfix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Postfix({'qval': 1, 'sim_test': }), E left='', E right='𠬁Õ', E ) E Unreliable test timings! On an initial run, this test took 485.31ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.53 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________________ test_normalization_range[alg9] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.24 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_common.py:50: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(206324495051762869048868554517741753114) to this test or run pytest with --hypothesis-seed=206324495051762869048868554517741753114 to reproduce this failure. ________________________ test_normalization_same[alg21] ________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 text = '¸\x86í\U00099e18á\U00098a65' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('¸\x86í\U00099e18á\U00098a65', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 1, start = 11038447.28925064, result = None finish = 11038447.810560744, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=521310) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 521.31ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given(text=hypothesis.strategies.text()) tests/test_common.py:71: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 33 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 521.31ms, which exceeds the deadline of 200.00ms'), "args = ('¸\\x86í\\U00099e18á\\U00098...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_same(text='¸\x86í\U00099e18á\U00098a65', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_same( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E text='¸\x86í\U00099e18á\U00098a65', E ) E Unreliable test timings! On an initial run, this test took 521.31ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.01 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg19] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '𐬰', right = '' alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𐬰', '', Postfix({'qval': 1, 'sim_test': })) kwargs = {}, initial_draws = 2, start = 11038438.6839557, result = None finish = 11038439.008178603, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=324223) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 324.22ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Postfix({'qval': 1, 'sim_test': }) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 7 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 324.22ms, which exceeds the deadline of 200.00ms'), "args = ('𐬰', '', Postfix({'qval': 1,...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='𐬰', right='', alg=Postfix({'qval': 1, 'sim_test': })) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Postfix({'qval': 1, 'sim_test': }), E left='𐬰', E right='', E ) E Unreliable test timings! On an initial run, this test took 324.22ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.54 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg9] ________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '\U000922c5\U000922c5\U0010d33f', right = 'È>Óú' alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000922c5\U000922c5\U0010d33f', 'È>Óú', RatcliffObershelp({'qval': 1, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038473.65989686, result = None finish = 11038474.032085264, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=372188) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 372.19ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RatcliffObershelp({'qval': 1, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 47 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 372.19ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000922c5\\U000922c5\\U00...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U000922c5\U000922c5\U0010d33f', right='È>Óú', alg=RatcliffObershelp({'qval': 1, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=RatcliffObershelp({'qval': 1, 'external': True}), E left='\U000922c5\U000922c5\U0010d33f', E right='È>Óú', E ) E Unreliable test timings! On an initial run, this test took 372.19ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.97 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg10] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\U00034a0a\x84' alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U00034a0a\x84', Jaccard({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038495.700042961, result = None finish = 11038496.212159265, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=512116) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 512.12ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 12 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 512.12ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U00034a0a\\x84', Jacc...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='\U00034a0a\x84', alg=Jaccard({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Jaccard({'qval': 1, 'as_set': False, 'external': True}), E left='', E right='\U00034a0a\x84', E ) E Unreliable test timings! On an initial run, this test took 512.12ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.26 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg21] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\U0005e160ä÷' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U0005e160ä÷', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11038506.699229913, result = None finish = 11038507.208182316, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=508952) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 508.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 17 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 508.95ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U0005e160ä÷', Needlem...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='', right='\U0005e160ä÷', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='', E right='\U0005e160ä÷', E ) E Unreliable test timings! On an initial run, this test took 508.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.63 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg10] _______________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '\U0008e304D\x83\x02\U00064108', right = '\x8e\U00070249n\U00094818' alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0008e304D\x83\x02\U00064108', '\x8e\U00070249n\U00094818', Jaccard({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038503.489023896, result = None finish = 11038503.8529895, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=363966) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 363.97ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Jaccard({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 49 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 363.97ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0008e304D\\x83\\x02\\U00...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U0008e304D\x83\x02\U00064108', right='\x8e\U00070249n\U00094818', alg=Jaccard({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Jaccard({'qval': 1, 'as_set': False, 'external': True}), E left='\U0008e304D\x83\x02\U00064108', E right='\x8e\U00070249n\U00094818', E ) E Unreliable test timings! On an initial run, this test took 363.97ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.70 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg21] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = 'ÒC', right = '8C¶>\x04' alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ÒC', '8C¶>\x04', NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11038517.27683036, result = None finish = 11038518.610980267, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=334150) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1334.15ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 34 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1334.15ms, which exceeds the deadline of 200.00ms'), "args = ('ÒC', '8C¶>\\x04', Needlema...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='ÒC', right='8C¶>\x04', alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=NeedlemanWunsch({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='ÒC', E right='8C¶>\x04', E ) E Unreliable test timings! On an initial run, this test took 1334.15ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.58 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg22] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = 'dó', right = '\U000cc581PСé' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('dó', '\U000cc581PСé', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11038527.693138007, result = None finish = 11038528.815126015, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=121988) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1121.99ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 39 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1121.99ms, which exceeds the deadline of 200.00ms'), "args = ('dó', '\\U000cc581PСé', Sm...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='dó', right='\U000cc581PСé', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='dó', E right='\U000cc581PСé', E ) E Unreliable test timings! On an initial run, this test took 1121.99ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.75 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_qval[2-JaroWinkler] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '0', alg = 'JaroWinkler', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '0', 'JaroWinkler', 2), kwargs = {}, initial_draws = 2 start = 11038526.931710504, result = None, finish = 11038537.493056951 internal_draw_time = 0 runtime = datetime.timedelta(seconds=10, microseconds=561346) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 10561.35ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 8 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 10561.35ms, which exceeds the deadline of 200.00ms'), "args = ('0', '0', 'JaroWinkler', 2...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='0', right='0', alg='JaroWinkler', qval=2) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=2, E left='0', E right='0', E ) E Unreliable test timings! On an initial run, this test took 10561.35ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.76 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_range[alg11] ________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '爨\U000761df!鑖Ã', right = '' alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('爨\U000761df!鑖Ã', '', Sorensen({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038538.081463857, result = None finish = 11038538.409029355, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=327565) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 327.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 45 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 327.56ms, which exceeds the deadline of 200.00ms'), "args = ('爨\\U000761df!鑖Ã', '', Soren...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='爨\U000761df!鑖Ã', right='', alg=Sorensen({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Sorensen({'qval': 1, 'as_set': False, 'external': True}), E left='爨\U000761df!鑖Ã', E right='', E ) E Unreliable test timings! On an initial run, this test took 327.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.42 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg22] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '\U0006a18bs', right = '¤íU' alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U0006a18bs', '¤íU', SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) kwargs = {}, initial_draws = 2, start = 11038552.08964192, result = None finish = 11038552.614074621, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=524433) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 524.43ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 30 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 524.43ms, which exceeds the deadline of 200.00ms'), "args = ('\\U0006a18bs', '¤íU', Smith...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\U0006a18bs', right='¤íU', alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=SmithWaterman({'qval': 1, 'gap_cost': 1.0, 'sim_func': , 'external': True}), E left='\U0006a18bs', E right='¤íU', E ) E Unreliable test timings! On an initial run, this test took 524.43ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.68 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg11] _______________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '\x91\x164m', right = '?\x85' alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x91\x164m', '?\x85', Sorensen({'qval': 1, 'as_set': False, 'external': True})) kwargs = {}, initial_draws = 2, start = 11038569.213157298, result = None finish = 11038569.5930977, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=379940) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 379.94ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Sorensen({'qval': 1, 'as_set': False, 'external': True}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 33 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 379.94ms, which exceeds the deadline of 200.00ms'), "args = ('\\x91\\x164m', '?\\x85', So...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_by_one(left='\x91\x164m', right='?\x85', alg=Sorensen({'qval': 1, 'as_set': False, 'external': True})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_by_one( E alg=Sorensen({'qval': 1, 'as_set': False, 'external': True}), E left='\x91\x164m', E right='?\x85', E ) E Unreliable test timings! On an initial run, this test took 379.94ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.48 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg6] ______________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '', alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})), kwargs = {} initial_draws = 2, start = 11038582.72831256, result = None finish = 11038583.209646363, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=481334) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 481.33ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 481.33ms, which exceeds the deadline of 200.00ms'), "args = ('', '', EntropyNCD({'qval': ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 481.33ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.11 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg0] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 11038567.115864588, result = None finish = 11038567.617354292, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=501490) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 501.49ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 501.49ms, which exceeds the deadline of 200.00ms'), "args = ('', '', ArithNCD({'base': 2,...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 501.49ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 17.16 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_normalization_by_one[alg23] _______________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'E', 'D', 'M', 'T', 'K', 'N', 'C', 'B', 'Y', 'S', 'I', 'Q', 'P', 'F', 'X', 'G', 'A', 'R', 'L', 'U', 'J', 'O', 'Z', 'V'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𡜂¦º㣧¿\U000a2b08\U0009bfd2 \x15@\x108¹\x16.2pD', '\x96\U000352dc\U0003bf86\x01\U000bb1f0', Editex({'match_cost': 0, '... 'D', 'M', 'T', 'K', 'N', 'C', 'B', 'Y', 'S', 'I', 'Q', 'P', 'F', 'X', 'G', 'A', 'R', 'L', 'U', 'J', 'O', 'Z', 'V'})})) kwargs = {}, initial_draws = 2, start = 11038605.882461665, result = None finish = 11038606.424690068, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=542228) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 542.23ms, which exceeds the deadline of 200.00ms E Falsifying example: test_normalization_by_one( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'E', 'D', 'M', 'T', 'K', 'N', 'C', 'B', 'Y', 'S', 'I', 'Q', 'P', 'F', 'X', 'G', 'A', 'R', 'L', 'U', 'J', 'O', 'Z', 'V'})}), E left='𡜂¦º㣧¿\U000a2b08\U0009bfd2 \x15@\x108¹\x16.2pD', E right='\x96\U000352dc\U0003bf86\x01\U000bb1f0', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _____________________________ test_qval[None-Jaro] _____________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 left = '\x8a\x0f3\U000cb38d³\x13p', right = 'þ\U0010c1ae', alg = 'Jaro' qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x8a\x0f3\U000cb38d³\x13p', 'þ\U0010c1ae', 'Jaro', None), kwargs = {} initial_draws = 2, start = 11038578.48795204, result = None finish = 11038579.012133343, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=524181) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 524.18ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 45 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 524.18ms, which exceeds the deadline of 200.00ms'), "args = ('\\x8a\\x0f3\\U000cb38d³\\x1...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\x8a\x0f3\U000cb38d³\x13p', right='þ\U0010c1ae', alg='Jaro', qval=None) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=None, E left='\x8a\x0f3\U000cb38d³\x13p', E right='þ\U0010c1ae', E ) E Unreliable test timings! On an initial run, this test took 524.18ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.81 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_qval[None-JaroWinkler] __________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = 'JaroWinkler', qval = None @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.29 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_external.py:51: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(116207417766110363442113054527121259169) to this test or run pytest with --hypothesis-seed=116207417766110363442113054527121259169 to reproduce this failure. ______________________________ test_qval[1-Jaro] _______________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 alg = 'Jaro', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.13 seconds (0 invalid ones and 1 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_external.py:51: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(164184391522299064642317228823435159604) to this test or run pytest with --hypothesis-seed=164184391522299064642317228823435159604 to reproduce this failure. ______________________________ test_qval[3-Jaro] _______________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 left = 'x', right = '\U0004596cm', alg = 'Jaro', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('x', '\U0004596cm', 'Jaro', 3), kwargs = {}, initial_draws = 2 start = 11038603.291689454, result = None, finish = 11038603.613171857 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=321482) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 321.48ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 16 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 321.48ms, which exceeds the deadline of 200.00ms'), "args = ('x', '\\U0004596cm', 'Jaro',...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='x', right='\U0004596cm', alg='Jaro', qval=3) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=3, E left='x', E right='\U0004596cm', E ) E Unreliable test timings! On an initial run, this test took 321.48ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.21 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_qval[3-JaroWinkler] ___________________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 alg = 'JaroWinkler', qval = 3 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_external.py:51: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(4378754298315023829739992728254182654) to this test or run pytest with --hypothesis-seed=4378754298315023829739992728254182654 to reproduce this failure. _______________________ test_normalization_range[alg23] ________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '61␁𠧌^', right = '?\U0008fbe9' alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'Z', 'Q', 'O', 'F', 'L', 'U', 'S', 'R', 'B', 'N', 'A', 'J', 'C', 'E', 'P', 'M', 'T', 'Y', 'D', 'X', 'I', 'K', 'V', 'G'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('61␁𠧌^', '?\U0008fbe9', Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': Tru... 'Q', 'O', 'F', 'L', 'U', 'S', 'R', 'B', 'N', 'A', 'J', 'C', 'E', 'P', 'M', 'T', 'Y', 'D', 'X', 'I', 'K', 'V', 'G'})})) kwargs = {}, initial_draws = 2, start = 11038576.398884732, result = None finish = 11038577.413882436, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=14998) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1015.00ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'Z', 'Q', 'O', 'F', 'L', 'U', 'S', 'R', 'B', 'N', 'A', 'J', 'C', 'E', 'P', 'M', 'T', 'Y', 'D', 'X', 'I', 'K', 'V', 'G'})}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_common.py:50: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 48 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1015.00ms, which exceeds the deadline of 200.00ms'), "args = ('61␁𠧌^', '?\\U0008fbe9', Ed...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(left='61␁𠧌^', right='?\U0008fbe9', alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'Z', 'Q', 'O', 'F', 'L', 'U', 'S', 'R', 'B', 'N', 'A', 'J', 'C', 'E', 'P', 'M', 'T', 'Y', 'D', 'X', 'I', 'K', 'V', 'G'})})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E alg=Editex({'match_cost': 0, 'group_cost': 1, 'mismatch_cost': 2, 'local': False, 'external': True, 'grouped': frozenset({'Z', 'Q', 'O', 'F', 'L', 'U', 'S', 'R', 'B', 'N', 'A', 'J', 'C', 'E', 'P', 'M', 'T', 'Y', 'D', 'X', 'I', 'K', 'V', 'G'})}), E left='61␁𠧌^', E right='?\U0008fbe9', E ) E Unreliable test timings! On an initial run, this test took 1015.00ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 7.21 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg1] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '', alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', BWTRLENCD({'terminator': '\x00'})), kwargs = {} initial_draws = 2, start = 11038604.65757276, result = None finish = 11038605.220548164, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=562975) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 562.98ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 562.98ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', BWTRLENCD({'termina...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='0', right='', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=BWTRLENCD({'terminator': '\x00'}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 562.98ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 8.46 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_simmetry_compressor ___________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 text = '𗱦¥²\x1b\U000f2935ü®Ð\\\U000e9752¸¥\U00106fc7鏪\x06ûxú' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_simmetry_compressor(text): tests/test_compression/test_sqrt_ncd.py:25: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𗱦¥²\x1b\U000f2935ü®Ð\\\U000e9752¸¥\U00106fc7鏪\x06ûxú',), kwargs = {} initial_draws = 1, start = 11038618.368427023, result = None finish = 11038618.814044723, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=445618) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 445.62ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_simmetry_compressor(text): tests/test_compression/test_sqrt_ncd.py:25: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 107 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 445.62ms, which exceeds the deadline of 200.00ms'), "args = ('𗱦¥²\\x1b\\U000f2935ü®Ð\\\\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry_compressor(text='𗱦¥²\x1b\U000f2935ü®Ð\\\U000e9752¸¥\U00106fc7鏪\x06ûxú') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry_compressor( E text='𗱦¥²\x1b\U000f2935ü®Ð\\\U000e9752¸¥\U00106fc7鏪\x06ûxú', E ) E Unreliable test timings! On an initial run, this test took 445.62ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.53 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg0] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '0' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '0', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 11038604.68195416, result = None finish = 11038605.218901765, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=536948) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 536.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 536.95ms, which exceeds the deadline of 200.00ms'), "args = ('', '0', ArithNCD({'base': 2...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='0', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='', E right='0', E ) E Unreliable test timings! On an initial run, this test took 536.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 9.90 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg4] _________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '', alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 11038632.259189688, result = None, finish = 11038632.667448187 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=408258) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 408.26ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 408.26ms, which exceeds the deadline of 200.00ms'), "args = ('0', '', ZLIBNCD({})), kwarg...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='0', right='', alg=ZLIBNCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=ZLIBNCD({}), E left='0', E right='', E ) E Unreliable test timings! On an initial run, this test took 408.26ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.87 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_idempotency_compressor __________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 text = '0' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_idempotency_compressor(text): tests/test_compression/test_sqrt_ncd.py:31: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0',), kwargs = {}, initial_draws = 1, start = 11038640.312020523 result = None, finish = 11038640.623681525, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=311661) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 311.66ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_idempotency_compressor(text): tests/test_compression/test_sqrt_ncd.py:31: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 5 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 311.66ms, which exceeds the deadline of 200.00ms'), "args = ('0',), kwargs = {}, initial_...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_idempotency_compressor(text='0') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_idempotency_compressor( E text='0', E ) E Unreliable test timings! On an initial run, this test took 311.66ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.34 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg5] _________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.85 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_compression/test_common.py:60: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(296437173495695685696792781872404175697) to this test or run pytest with --hypothesis-seed=296437173495695685696792781872404175697 to reproduce this failure. ___________________________ test_qval[1-JaroWinkler] ___________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 left = '\x0c', right = '0', alg = 'JaroWinkler', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x0c', '0', 'JaroWinkler', 1), kwargs = {}, initial_draws = 2 start = 11038665.52028294, result = None, finish = 11038666.014302243 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=494019) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 494.02ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler', qval = 1 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 8 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 494.02ms, which exceeds the deadline of 200.00ms'), "args = ('\\x0c', '0', 'JaroWinkler',...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='\x0c', right='0', alg='JaroWinkler', qval=1) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='JaroWinkler', E qval=1, E left='\x0c', E right='0', E ) E Unreliable test timings! On an initial run, this test took 494.02ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.30 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_monotonicity_compressor _________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 left = '𐳈', right = '3' @hypothesis.given( > left=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) tests/test_compression/test_sqrt_ncd.py:38: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('𐳈', '3'), kwargs = {}, initial_draws = 2, start = 11038671.104724564 result = None, finish = 11038671.614526568, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=509802) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 509.80ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(min_size=1), right=hypothesis.strategies.characters(), ) tests/test_compression/test_sqrt_ncd.py:38: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 9 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 509.80ms, which exceeds the deadline of 200.00ms'), "args = ('𐳈', '3'), kwargs = {}, init...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_monotonicity_compressor(left='𐳈', right='3') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_monotonicity_compressor( E left='𐳈', E right='3', E ) E Unreliable test timings! On an initial run, this test took 509.80ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.32 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg2] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '0', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 11038694.03391517, result = None, finish = 11038694.838089371 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=804174) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 804.17ms, which exceeds the deadline of 200.00ms E Falsifying example: test_simmetry( E alg=BZ2NCD({}), E left='', E right='0', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded _____________________________ test_simmetry[alg3] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.22 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_compression/test_common.py:36: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(307613202047923366661856420025742026514) to this test or run pytest with --hypothesis-seed=307613202047923366661856420025742026514 to reproduce this failure. ______________________________ test_qval[2-Jaro] _______________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 left = '0', right = '0', alg = 'Jaro', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('0', '0', 'Jaro', 2), kwargs = {}, initial_draws = 2 start = 11038691.68067016, result = None, finish = 11038692.21364416 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=532974) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 532.97ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'Jaro', qval = 2 @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:51: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 9 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 532.97ms, which exceeds the deadline of 200.00ms'), "args = ('0', '0', 'Jaro', 2), kwargs...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_qval(left='0', right='0', alg='Jaro', qval=2) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_qval( E alg='Jaro', E qval=2, E left='0', E right='0', E ) E Unreliable test timings! On an initial run, this test took 532.97ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.92 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg1] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = 'h', alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'h', BWTRLENCD({'terminator': '\x00'})), kwargs = {} initial_draws = 2, start = 11038689.68950505, result = None finish = 11038690.827167155, internal_draw_time = 0 runtime = datetime.timedelta(seconds=1, microseconds=137662) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 1137.66ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 6 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 1137.66ms, which exceeds the deadline of 200.00ms'), "args = ('', 'h', BWTRLENCD({'termin...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='h', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=BWTRLENCD({'terminator': '\x00'}), E left='', E right='h', E ) E Unreliable test timings! On an initial run, this test took 1137.66ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 188.21 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg6] _________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 left = '\U000b8375\U0005ce1bÔ\U000823d7\x1c', right = '\x1bþ\x9d2]eÁ.' alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\U000b8375\U0005ce1bÔ\U000823d7\x1c', '\x1bþ\x9d2]eÁ.', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) kwargs = {}, initial_draws = 2, start = 11038678.6454843, result = None finish = 11038679.0178582, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=372374) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 372.37ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 75 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 372.37ms, which exceeds the deadline of 200.00ms'), "args = ('\\U000b8375\\U0005ce1bÔ\\U0...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='\U000b8375\U0005ce1bÔ\U000823d7\x1c', right='\x1bþ\x9d2]eÁ.', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='\U000b8375\U0005ce1bÔ\U000823d7\x1c', E right='\x1bþ\x9d2]eÁ.', E ) E Unreliable test timings! On an initial run, this test took 372.37ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.85 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________________________ test_compare_with_tversky ___________________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.07 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_token/test_jaccard.py:29: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(185518209100360811422388986057517212726) to this test or run pytest with --hypothesis-seed=185518209100360811422388986057517212726 to reproduce this failure. ______________________ test_list_of_numbers[JaroWinkler] _______________________ [gw5] linux -- Python 3.12.0 /usr/bin/python3 left = [-22752, -18], right = [12910, -272206610, 17357], alg = 'JaroWinkler' @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ([-22752, -18], [12910, -272206610, 17357], 'JaroWinkler'), kwargs = {} initial_draws = 2, start = 11038714.473229464, result = None finish = 11038714.869897565, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=396668) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 396.67ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = 'JaroWinkler' @pytest.mark.external > @pytest.mark.parametrize('alg', libraries.get_algorithms()) tests/test_external.py:91: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 37 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 396.67ms, which exceeds the deadline of 200.00ms'), "args = ([-22752, -18], [12910, -2722...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_list_of_numbers(left=[-22752, -18], right=[12910, -272206610, 17357], alg='JaroWinkler') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_list_of_numbers( E alg='JaroWinkler', E left=[-22752, -18], E right=[12910, -272206610, 17357], E ) E Unreliable test timings! On an initial run, this test took 396.67ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.47 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg4] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'u\x97?¬í', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 11038741.331173787, result = None, finish = 11038741.827004788 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=495831) current_deadline = timedelta(milliseconds=200) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 495.83ms, which exceeds the deadline of 200.00ms E Falsifying example: test_simmetry( E alg=ZLIBNCD({}), E left='', E right='u\x97?¬í', E ) /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded ___________________________ test_is_normalized[alg2] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '', alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 11038717.328091675, result = None, finish = 11038718.22077328 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=892682) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 892.68ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 892.68ms, which exceeds the deadline of 200.00ms'), "args = ('', '', BZ2NCD({})), kwargs ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='', alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=BZ2NCD({}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 892.68ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 25.13 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________________ test_simmetry[alg5] ______________________________ [gw0] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '', alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', SqrtNCD({'qval': 1})), kwargs = {}, initial_draws = 2 start = 11038755.699354352, result = None, finish = 11038756.018285854 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=318932) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 318.93ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:36: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 318.93ms, which exceeds the deadline of 200.00ms'), "args = ('', '', SqrtNCD({'qval': 1})...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_simmetry(left='', right='', alg=SqrtNCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_simmetry( E alg=SqrtNCD({'qval': 1}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 318.93ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.87 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_compare_with_tversky_as_set _______________________ [gw4] linux -- Python 3.12.0 /usr/bin/python3 left = '\ue530', right = '\U00071004\x926' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_sorensen.py:37: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\ue530', '\U00071004\x926'), kwargs = {}, initial_draws = 2 start = 11038766.694728501, result = None, finish = 11038767.225994604 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=531266) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 531.27ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_sorensen.py:37: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 19 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 531.27ms, which exceeds the deadline of 200.00ms'), "args = ('\\ue530', '\\U00071004\\x92...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky_as_set(left='\ue530', right='\U00071004\x926') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky_as_set( E left='\ue530', E right='\U00071004\x926', E ) E Unreliable test timings! On an initial run, this test took 531.27ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.70 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg3] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = 'ò\U000e3715f¼\U00061386ú!\U000daacfhã]', right = '' alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ò\U000e3715f¼\U00061386ú!\U000daacfhã]', '', RLENCD({'qval': 1})) kwargs = {}, initial_draws = 2, start = 11038767.671095408, result = None finish = 11038768.025651207, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=354556) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 354.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = RLENCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 74 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 354.56ms, which exceeds the deadline of 200.00ms'), "args = ('ò\\U000e3715f¼\\U00061386ú!...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='ò\U000e3715f¼\U00061386ú!\U000daacfhã]', right='', alg=RLENCD({'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=RLENCD({'qval': 1}), E left='ò\U000e3715f¼\U00061386ú!\U000daacfhã]', E right='', E ) E Unreliable test timings! On an initial run, this test took 354.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.88 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_idempotency_compressor __________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 text = '\x97\x97½\U0010dae4' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_idempotency_compressor(text): tests/test_compression/test_entropy_ncd.py:32: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x97\x97½\U0010dae4',), kwargs = {}, initial_draws = 1 start = 11038798.477574049, result = None, finish = 11038799.02366375 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=546090) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 546.09ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_idempotency_compressor(text): tests/test_compression/test_entropy_ncd.py:32: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 23 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 546.09ms, which exceeds the deadline of 200.00ms'), "args = ('\\x97\\x97½\\U0010dae4',), ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_idempotency_compressor(text='\x97\x97½\U0010dae4') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_idempotency_compressor( E text='\x97\x97½\U0010dae4', E ) E Unreliable test timings! On an initial run, this test took 546.09ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.32 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg4] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\U000451a3_\x05', alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\U000451a3_\x05', ZLIBNCD({})), kwargs = {}, initial_draws = 2 start = 11038802.709977968, result = None, finish = 11038803.039266769 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=329289) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 329.29ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ZLIBNCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 18 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 329.29ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\U000451a3_\\x05', ZLI...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='\U000451a3_\x05', alg=ZLIBNCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=ZLIBNCD({}), E left='', E right='\U000451a3_\x05', E ) E Unreliable test timings! On an initial run, this test took 329.29ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.26 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_normalization_range ___________________________ [gw3] linux -- Python 3.12.0 /usr/bin/python3 text = 'µ%\U000c73a4ȵ\U0010d441' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_sqrt_ncd.py:59: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('µ%\U000c73a4ȵ\U0010d441',), kwargs = {}, initial_draws = 1 start = 11038791.949825918, result = None, finish = 11038792.21890952 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=269084) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 269.08ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_sqrt_ncd.py:59: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 33 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 269.08ms, which exceeds the deadline of 200.00ms'), "args = ('µ%\\U000c73a4ȵ\\U0010d441'...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(text='µ%\U000c73a4ȵ\U0010d441') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E text='µ%\U000c73a4ȵ\U0010d441', E ) E Unreliable test timings! On an initial run, this test took 269.08ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.17 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg5] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 alg = SqrtNCD({'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.38 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test_compression/test_common.py:48: FailedHealthCheck ---------------------------------- Hypothesis ---------------------------------- You can add @seed(241990973157636305197841381679435759510) to this test or run pytest with --hypothesis-seed=241990973157636305197841381679435759510 to reproduce this failure. __________________________ test_compare_with_tversky ___________________________ [gw7] linux -- Python 3.12.0 /usr/bin/python3 left = 'ä\U000c9773¢Ó\U000c7b1f\U000b0004\x05÷\x81\U000c9773' right = '\U0006e1bc' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_sorensen.py:27: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('ä\U000c9773¢Ó\U000c7b1f\U000b0004\x05÷\x81\U000c9773', '\U0006e1bc') kwargs = {}, initial_draws = 2, start = 11038771.191466523, result = None finish = 11038771.822231526, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=630765) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 630.76ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_sorensen.py:27: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 70 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 630.76ms, which exceeds the deadline of 200.00ms'), "args = ('ä\\U000c9773¢Ó\\U000c7b1f\\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky(left='ä\U000c9773¢Ó\U000c7b1f\U000b0004\x05÷\x81\U000c9773', right='\U0006e1bc') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky( E left='ä\U000c9773¢Ó\U000c7b1f\U000b0004\x05÷\x81\U000c9773', E right='\U0006e1bc', E ) E Unreliable test timings! On an initial run, this test took 630.76ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.87 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________________ test_compare_with_tversky_as_set _______________________ [gw6] linux -- Python 3.12.0 /usr/bin/python3 left = '쌆\x14\x0e', right = '\x0f\x873\U000a4252\x15É\x8eÙ' @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('쌆\x14\x0e', '\x0f\x873\U000a4252\x15É\x8eÙ'), kwargs = {} initial_draws = 2, start = 11038804.522838175, result = None finish = 11038805.025243677, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=502406) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 502.41ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given( > left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_token/test_jaccard.py:39: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 57 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 502.41ms, which exceeds the deadline of 200.00ms'), "args = ('쌆\\x14\\x0e', '\\x0f\\x873\...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_compare_with_tversky_as_set(left='쌆\x14\x0e', right='\x0f\x873\U000a4252\x15É\x8eÙ') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_compare_with_tversky_as_set( E left='쌆\x14\x0e', E right='\x0f\x873\U000a4252\x15É\x8eÙ', E ) E Unreliable test timings! On an initial run, this test took 502.41ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.37 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_is_normalized[alg6] ___________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = 'ë^\x89J\x83' alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', 'ë^\x89J\x83', EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) kwargs = {}, initial_draws = 2, start = 11038829.132899988, result = None finish = 11038829.48436019, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=351460) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 351.46ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:48: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 26 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 351.46ms, which exceeds the deadline of 200.00ms'), "args = ('', 'ë^\\x89J\\x83', Entropy...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_is_normalized(left='', right='ë^\x89J\x83', alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_is_normalized( E alg=EntropyNCD({'qval': 1, 'coef': 1, 'base': 2}), E left='', E right='ë^\x89J\x83', E ) E Unreliable test timings! On an initial run, this test took 351.46ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.79 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg0] _________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '' alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) kwargs = {}, initial_draws = 2, start = 11038848.575440176, result = None finish = 11038848.99568238, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=420242) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 420.24ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = ArithNCD({'base': 2, 'terminator': None, 'qval': 1}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 420.24ms, which exceeds the deadline of 200.00ms'), "args = ('', '', ArithNCD({'base': 2,...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='', alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=ArithNCD({'base': 2, 'terminator': None, 'qval': 1}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 420.24ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 4.44 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg1] _________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '\x86~\U000c48bdÂ\U0005ecfd\U000f2779' alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '\x86~\U000c48bdÂ\U0005ecfd\U000f2779', BWTRLENCD({'terminator': '\x00'})) kwargs = {}, initial_draws = 2, start = 11038904.732027633, result = None finish = 11038905.219451336, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=487424) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 487.42ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BWTRLENCD({'terminator': '\x00'}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 47 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 487.42ms, which exceeds the deadline of 200.00ms'), "args = ('', '\\x86~\\U000c48bdÂ\\U00...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='\x86~\U000c48bdÂ\U0005ecfd\U000f2779', alg=BWTRLENCD({'terminator': '\x00'})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=BWTRLENCD({'terminator': '\x00'}), E left='', E right='\x86~\U000c48bdÂ\U0005ecfd\U000f2779', E ) E Unreliable test timings! On an initial run, this test took 487.42ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.82 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________________ test_normalization_range ___________________________ [gw1] linux -- Python 3.12.0 /usr/bin/python3 text = '\x1d' @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('\x1d',), kwargs = {}, initial_draws = 1, start = 11038913.357482074 result = None, finish = 11038913.617116977, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=259635) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 259.63ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: @hypothesis.given(text=hypothesis.strategies.text(min_size=1)) > def test_normalization_range(text): tests/test_compression/test_entropy_ncd.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 259.63ms, which exceeds the deadline of 200.00ms'), "args = ('\\x1d',), kwargs = {}, init...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalization_range(text='\x1d') produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalization_range( E text='\x1d', E ) E Unreliable test timings! On an initial run, this test took 259.63ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.70 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________________ test_normalized_by_one[alg2] _________________________ [gw2] linux -- Python 3.12.0 /usr/bin/python3 left = '', right = '', alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = ('', '', BZ2NCD({})), kwargs = {}, initial_draws = 2 start = 11038922.173319515, result = None, finish = 11038922.465271516 internal_draw_time = 0, runtime = datetime.timedelta(microseconds=291952) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 291.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: alg = BZ2NCD({}) @pytest.mark.parametrize('alg', ALGS) > @hypothesis.given( left=hypothesis.strategies.text(), right=hypothesis.strategies.text(), ) tests/test_compression/test_common.py:60: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 291.95ms, which exceeds the deadline of 200.00ms'), "args = ('', '', BZ2NCD({})), kwargs ...hich exceeds the deadline of 200.00ms\n\n/usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded\n") example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis test_normalized_by_one(left='', right='', alg=BZ2NCD({})) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: test_normalized_by_one( E alg=BZ2NCD({}), E left='', E right='', E ) E Unreliable test timings! On an initial run, this test took 291.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 12.89 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky =========================== short test summary info ============================ FAILED tests/test_common.py::test_normalization_same[alg12] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg0] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg1] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg0] - hypothesis.error... FAILED tests/test_common.py::test_normalization_same[alg1] - hypothesis.error... FAILED tests/test_common.py::test_normalization_range[alg12] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg12] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg0] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg1] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg2] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg2] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg13] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg14] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg13] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg3] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg15] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg16] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg5] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg15] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg5] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg16] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg17] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg18] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg6] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg17] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg16] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg6] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg17] - hypothesis.er... FAILED tests/test_common.py::test_normalization_same[alg7] - hypothesis.error... FAILED tests/test_common.py::test_normalization_by_one[alg7] - hypothesis.err... FAILED tests/test_common.py::test_normalization_same[alg8] - hypothesis.error... FAILED tests/test_common.py::test_normalization_range[alg7] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg19] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg9] - hypothesis.error... FAILED tests/test_common.py::test_normalization_same[alg10] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg8] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg8] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_range[alg19] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg9] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_same[alg21] - hypothesis.erro... FAILED tests/test_common.py::test_normalization_by_one[alg19] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg9] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg10] - hypothesis.err... FAILED tests/test_common.py::test_normalization_range[alg21] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg10] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg21] - hypothesis.er... FAILED tests/test_common.py::test_normalization_range[alg22] - hypothesis.err... FAILED tests/test_external.py::test_qval[2-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_common.py::test_normalization_range[alg11] - hypothesis.err... FAILED tests/test_common.py::test_normalization_by_one[alg22] - hypothesis.er... FAILED tests/test_common.py::test_normalization_by_one[alg11] - hypothesis.er... FAILED tests/test_compression/test_common.py::test_simmetry[alg6] - hypothesi... FAILED tests/test_compression/test_common.py::test_simmetry[alg0] - hypothesi... FAILED tests/test_common.py::test_normalization_by_one[alg23] - hypothesis.er... FAILED tests/test_external.py::test_qval[None-Jaro] - hypothesis.errors.Flaky... FAILED tests/test_external.py::test_qval[None-JaroWinkler] - hypothesis.error... FAILED tests/test_external.py::test_qval[1-Jaro] - hypothesis.errors.FailedHe... FAILED tests/test_external.py::test_qval[3-Jaro] - hypothesis.errors.Flaky: H... FAILED tests/test_external.py::test_qval[3-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_common.py::test_normalization_range[alg23] - hypothesis.err... FAILED tests/test_compression/test_common.py::test_simmetry[alg1] - hypothesi... FAILED tests/test_compression/test_sqrt_ncd.py::test_simmetry_compressor - hy... FAILED tests/test_compression/test_common.py::test_is_normalized[alg0] - hypo... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg4] - ... FAILED tests/test_compression/test_sqrt_ncd.py::test_idempotency_compressor FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg5] - ... FAILED tests/test_external.py::test_qval[1-JaroWinkler] - hypothesis.errors.F... FAILED tests/test_compression/test_sqrt_ncd.py::test_monotonicity_compressor FAILED tests/test_compression/test_common.py::test_simmetry[alg2] - hypothesi... FAILED tests/test_compression/test_common.py::test_simmetry[alg3] - hypothesi... FAILED tests/test_external.py::test_qval[2-Jaro] - hypothesis.errors.Flaky: H... FAILED tests/test_compression/test_common.py::test_is_normalized[alg1] - hypo... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg6] - ... FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky - hypothes... FAILED tests/test_external.py::test_list_of_numbers[JaroWinkler] - hypothesis... FAILED tests/test_compression/test_common.py::test_simmetry[alg4] - hypothesi... FAILED tests/test_compression/test_common.py::test_is_normalized[alg2] - hypo... FAILED tests/test_compression/test_common.py::test_simmetry[alg5] - hypothesi... FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky_as_set - ... FAILED tests/test_compression/test_common.py::test_is_normalized[alg3] - hypo... FAILED tests/test_compression/test_entropy_ncd.py::test_idempotency_compressor FAILED tests/test_compression/test_common.py::test_is_normalized[alg4] - hypo... FAILED tests/test_compression/test_sqrt_ncd.py::test_normalization_range - hy... FAILED tests/test_compression/test_common.py::test_is_normalized[alg5] - hypo... FAILED tests/test_token/test_sorensen.py::test_compare_with_tversky - hypothe... FAILED tests/test_token/test_jaccard.py::test_compare_with_tversky_as_set - h... FAILED tests/test_compression/test_common.py::test_is_normalized[alg6] - hypo... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg0] - ... FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg1] - ... FAILED tests/test_compression/test_entropy_ncd.py::test_normalization_range FAILED tests/test_compression/test_common.py::test_normalized_by_one[alg2] - ... ================= 91 failed, 321 passed in 1259.47s (0:20:59) ================== RPM build errors: error: Bad exit status from /var/tmp/rpm-tmp.x4MtTE (%check) Bad exit status from /var/tmp/rpm-tmp.x4MtTE (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --noclean --target noarch --nodeps /builddir/build/SPECS/python-textdistance.spec