Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec'], chrootPath='/var/lib/mock/f40-build-2351712-60188/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1706227200 Wrote: /builddir/build/SRPMS/python-mutagen-1.47.0-3.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bb --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec'], chrootPath='/var/lib/mock/f40-build-2351712-60188/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bb --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1706227200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.U3llXS + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf mutagen-1.47.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/mutagen-1.47.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd mutagen-1.47.0 + rm -rf /builddir/build/BUILD/mutagen-1.47.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/mutagen-1.47.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.puZmzR + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd mutagen-1.47.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + /usr/bin/python3 setup.py build '--executable=/usr/bin/python3 -sP' running build running build_py creating build creating build/lib creating build/lib/mutagen copying mutagen/easymp4.py -> build/lib/mutagen copying mutagen/monkeysaudio.py -> build/lib/mutagen copying mutagen/m4a.py -> build/lib/mutagen copying mutagen/oggopus.py -> build/lib/mutagen copying mutagen/wave.py -> build/lib/mutagen copying mutagen/aiff.py -> build/lib/mutagen copying mutagen/easyid3.py -> build/lib/mutagen copying mutagen/_constants.py -> build/lib/mutagen copying mutagen/trueaudio.py -> build/lib/mutagen copying mutagen/_vorbis.py -> build/lib/mutagen copying mutagen/oggvorbis.py -> build/lib/mutagen copying mutagen/_iff.py -> build/lib/mutagen copying mutagen/tak.py -> build/lib/mutagen copying mutagen/flac.py -> build/lib/mutagen copying mutagen/_riff.py -> build/lib/mutagen copying mutagen/dsf.py -> build/lib/mutagen copying mutagen/wavpack.py -> build/lib/mutagen copying mutagen/optimfrog.py -> build/lib/mutagen copying mutagen/oggspeex.py -> build/lib/mutagen copying mutagen/ogg.py -> build/lib/mutagen copying mutagen/apev2.py -> build/lib/mutagen copying mutagen/aac.py -> build/lib/mutagen copying mutagen/dsdiff.py -> build/lib/mutagen copying mutagen/smf.py -> build/lib/mutagen copying mutagen/__init__.py -> build/lib/mutagen copying mutagen/musepack.py -> build/lib/mutagen copying mutagen/oggflac.py -> build/lib/mutagen copying mutagen/ac3.py -> build/lib/mutagen copying mutagen/_tags.py -> build/lib/mutagen copying mutagen/oggtheora.py -> build/lib/mutagen copying mutagen/_util.py -> build/lib/mutagen copying mutagen/_file.py -> build/lib/mutagen creating build/lib/mutagen/id3 copying mutagen/id3/_id3v1.py -> build/lib/mutagen/id3 copying mutagen/id3/_frames.py -> build/lib/mutagen/id3 copying mutagen/id3/__init__.py -> build/lib/mutagen/id3 copying mutagen/id3/_specs.py -> build/lib/mutagen/id3 copying mutagen/id3/_tags.py -> build/lib/mutagen/id3 copying mutagen/id3/_util.py -> build/lib/mutagen/id3 copying mutagen/id3/_file.py -> build/lib/mutagen/id3 creating build/lib/mutagen/mp4 copying mutagen/mp4/__init__.py -> build/lib/mutagen/mp4 copying mutagen/mp4/_atom.py -> build/lib/mutagen/mp4 copying mutagen/mp4/_as_entry.py -> build/lib/mutagen/mp4 copying mutagen/mp4/_util.py -> build/lib/mutagen/mp4 creating build/lib/mutagen/asf copying mutagen/asf/_attrs.py -> build/lib/mutagen/asf copying mutagen/asf/_objects.py -> build/lib/mutagen/asf copying mutagen/asf/__init__.py -> build/lib/mutagen/asf copying mutagen/asf/_util.py -> build/lib/mutagen/asf creating build/lib/mutagen/mp3 copying mutagen/mp3/__init__.py -> build/lib/mutagen/mp3 copying mutagen/mp3/_util.py -> build/lib/mutagen/mp3 creating build/lib/mutagen/_tools copying mutagen/_tools/moggsplit.py -> build/lib/mutagen/_tools copying mutagen/_tools/mid3cp.py -> build/lib/mutagen/_tools copying mutagen/_tools/mid3iconv.py -> build/lib/mutagen/_tools copying mutagen/_tools/mid3v2.py -> build/lib/mutagen/_tools copying mutagen/_tools/mutagen_pony.py -> build/lib/mutagen/_tools copying mutagen/_tools/__init__.py -> build/lib/mutagen/_tools copying mutagen/_tools/mutagen_inspect.py -> build/lib/mutagen/_tools copying mutagen/_tools/_util.py -> build/lib/mutagen/_tools copying mutagen/py.typed -> build/lib/mutagen + sphinx-build -b html -n docs docs/_build Running Sphinx v7.2.6 making output directory... done loading intersphinx inventory from https://docs.python.org/3/objects.inv... WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://docs.python.org/3/objects.inv' not fetchable due to : HTTPSConnectionPool(host='docs.python.org', port=443): Max retries exceeded with url: /3/objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) building [mo]: targets for 0 po files that are out of date writing output... building [html]: targets for 48 source files that are out of date updating environment: [new config] 48 added, 0 changed, 0 removed reading sources... [ 2%] api/aac reading sources... [ 4%] api/ac3 reading sources... [ 6%] api/aiff reading sources... [ 8%] api/ape reading sources... [ 10%] api/asf reading sources... [ 12%] api/base reading sources... [ 15%] api/dsdiff reading sources... [ 17%] api/dsf reading sources... [ 19%] api/flac reading sources... [ 21%] api/id3 reading sources... [ 23%] api/id3_frames reading sources... [ 25%] api/index reading sources... [ 27%] api/monkeysaudio reading sources... [ 29%] api/mp3 reading sources... [ 31%] api/mp4 reading sources... [ 33%] api/musepack reading sources... [ 35%] api/ogg reading sources... [ 38%] api/oggflac reading sources... [ 40%] api/oggopus reading sources... [ 42%] api/oggspeex reading sources... [ 44%] api/oggtheora reading sources... [ 46%] api/oggvorbis reading sources... [ 48%] api/optimfrog reading sources... [ 50%] api/smf reading sources... [ 52%] api/tak reading sources... [ 54%] api/trueaudio reading sources... [ 56%] api/vcomment reading sources... [ 58%] api/wave reading sources... [ 60%] api/wavpack reading sources... [ 62%] changelog reading sources... [ 65%] contact reading sources... [ 67%] index reading sources... [ 69%] man/index reading sources... [ 71%] man/mid3cp reading sources... [ 73%] man/mid3iconv reading sources... [ 75%] man/mid3v2 reading sources... [ 77%] man/moggsplit reading sources... [ 79%] man/mutagen-inspect reading sources... [ 81%] man/mutagen-pony reading sources... [ 83%] user/apev2 reading sources... [ 85%] user/classes reading sources... [ 88%] user/filelike reading sources... [ 90%] user/gettingstarted reading sources... [ 92%] user/id3 reading sources... [ 94%] user/index reading sources... [ 96%] user/mp4 reading sources... [ 98%] user/padding reading sources... [100%] user/vcomment looking for now-outdated files... none found pickling environment... done checking consistency... done preparing documents... done copying assets... copying static files... done copying extra files... done done writing output... [ 2%] api/aac writing output... [ 4%] api/ac3 writing output... [ 6%] api/aiff writing output... [ 8%] api/ape writing output... [ 10%] api/asf writing output... [ 12%] api/base writing output... [ 15%] api/dsdiff writing output... [ 17%] api/dsf writing output... [ 19%] api/flac writing output... [ 21%] api/id3 writing output... [ 23%] api/id3_frames writing output... [ 25%] api/index writing output... [ 27%] api/monkeysaudio writing output... [ 29%] api/mp3 writing output... [ 31%] api/mp4 writing output... [ 33%] api/musepack writing output... [ 35%] api/ogg writing output... [ 38%] api/oggflac writing output... [ 40%] api/oggopus writing output... [ 42%] api/oggspeex writing output... [ 44%] api/oggtheora writing output... [ 46%] api/oggvorbis writing output... [ 48%] api/optimfrog writing output... [ 50%] api/smf writing output... [ 52%] api/tak writing output... [ 54%] api/trueaudio writing output... [ 56%] api/vcomment writing output... [ 58%] api/wave writing output... [ 60%] api/wavpack writing output... [ 62%] changelog writing output... [ 65%] contact writing output... [ 67%] index writing output... [ 69%] man/index writing output... [ 71%] man/mid3cp writing output... [ 73%] man/mid3iconv writing output... [ 75%] man/mid3v2 writing output... [ 77%] man/moggsplit writing output... [ 79%] man/mutagen-inspect writing output... [ 81%] man/mutagen-pony writing output... [ 83%] user/apev2 writing output... [ 85%] user/classes writing output... [ 88%] user/filelike writing output... [ 90%] user/gettingstarted writing output... [ 92%] user/id3 writing output... [ 94%] user/index writing output... [ 96%] user/mp4 writing output... [ 98%] user/padding writing output... [100%] user/vcomment /builddir/build/BUILD/mutagen-1.47.0/docs/api/base.rst:54: WARNING: py:obj reference target not found: python:str /builddir/build/BUILD/mutagen-1.47.0/docs/api/base.rst:60: WARNING: py:obj reference target not found: python:bytes /builddir/build/BUILD/mutagen-1.47.0/docs/api/base.rst:66: WARNING: py:obj reference target not found: python:str /builddir/build/BUILD/mutagen-1.47.0/docs/api/base.rst:66: WARNING: py:obj reference target not found: python:bytes /builddir/build/BUILD/mutagen-1.47.0/mutagen/mp4/__init__.py:docstring of mutagen.mp4.MP4Chapters:1: WARNING: py:class reference target not found: collections.abc.Sequence /builddir/build/BUILD/mutagen-1.47.0/docs/user/filelike.rst:7: WARNING: py:obj reference target not found: io.BytesIO generating indices... genindex py-modindex done writing additional pages... search done copying images... [100%] images/logo.svg dumping search index in English (code: en)... done dumping object inventory... done build succeeded, 7 warnings. The HTML pages are in docs/_build. + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.SECPRj + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd mutagen-1.47.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + /usr/bin/python3 setup.py install -O1 --skip-build --root /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch --prefix /usr running install /usr/lib/python3.12/site-packages/setuptools/_distutils/cmd.py:66: SetuptoolsDeprecationWarning: setup.py install is deprecated. !! ******************************************************************************** Please avoid running ``setup.py`` directly. Instead, use pypa/build, pypa/installer or other standards-based tools. Follow the current Python packaging guidelines when building Python RPM packages. See https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html and https://docs.fedoraproject.org/en-US/packaging-guidelines/Python/ for details. ******************************************************************************** !! self.initialize_options() running install_lib creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12 creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/easymp4.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/monkeysaudio.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/m4a.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/oggopus.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/moggsplit.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mid3cp.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mid3iconv.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mid3v2.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mutagen_pony.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mutagen_inspect.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/wave.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/aiff.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/easyid3.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_constants.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/trueaudio.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_vorbis.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/oggvorbis.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3 copying build/lib/mutagen/mp3/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3 copying build/lib/mutagen/mp3/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3 copying build/lib/mutagen/_iff.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/tak.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/flac.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_riff.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/dsf.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 copying build/lib/mutagen/mp4/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 copying build/lib/mutagen/mp4/_atom.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 copying build/lib/mutagen/mp4/_as_entry.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 copying build/lib/mutagen/mp4/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_id3v1.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_frames.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_specs.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_tags.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_file.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/wavpack.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/optimfrog.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/py.typed -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/oggspeex.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/ogg.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/apev2.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/aac.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/dsdiff.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/smf.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/musepack.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/oggflac.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/ac3.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_tags.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/asf/_attrs.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/asf/_objects.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/asf/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/asf/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/oggtheora.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_file.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/easymp4.py to easymp4.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/monkeysaudio.py to monkeysaudio.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/m4a.py to m4a.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggopus.py to oggopus.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/moggsplit.py to moggsplit.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mid3cp.py to mid3cp.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mid3iconv.py to mid3iconv.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mid3v2.py to mid3v2.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mutagen_pony.py to mutagen_pony.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mutagen_inspect.py to mutagen_inspect.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/wave.py to wave.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/aiff.py to aiff.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/easyid3.py to easyid3.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_constants.py to _constants.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/trueaudio.py to trueaudio.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_vorbis.py to _vorbis.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggvorbis.py to oggvorbis.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_iff.py to _iff.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/tak.py to tak.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/flac.py to flac.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_riff.py to _riff.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/dsf.py to dsf.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4/_atom.py to _atom.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4/_as_entry.py to _as_entry.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_id3v1.py to _id3v1.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_frames.py to _frames.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_specs.py to _specs.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_tags.py to _tags.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_file.py to _file.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/wavpack.py to wavpack.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/optimfrog.py to optimfrog.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggspeex.py to oggspeex.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/ogg.py to ogg.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/apev2.py to apev2.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/aac.py to aac.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/dsdiff.py to dsdiff.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/smf.py to smf.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/musepack.py to musepack.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggflac.py to oggflac.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/ac3.py to ac3.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tags.py to _tags.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf/_attrs.py to _attrs.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf/_objects.py to _objects.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggtheora.py to oggtheora.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_file.py to _file.cpython-312.pyc writing byte-compilation script '/tmp/tmp7g65uolg.py' /usr/bin/python3 /tmp/tmp7g65uolg.py removing /tmp/tmp7g65uolg.py running install_data creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mid3cp.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mid3iconv.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mutagen-inspect.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mutagen-pony.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/moggsplit.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mid3v2.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 running install_egg_info running egg_info writing mutagen.egg-info/PKG-INFO writing dependency_links to mutagen.egg-info/dependency_links.txt writing entry points to mutagen.egg-info/entry_points.txt writing top-level names to mutagen.egg-info/top_level.txt reading manifest file 'mutagen.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' warning: no files found matching '*.pyi' under directory 'mutagen' warning: no files found matching 'README.rst' under directory 'mutagen' adding license file 'COPYING' writing manifest file 'mutagen.egg-info/SOURCES.txt' Copying mutagen.egg-info to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen-1.47.0-py3.12.egg-info running install_scripts Installing mid3cp script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing mid3iconv script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing mid3v2 script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing moggsplit script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing mutagen-inspect script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing mutagen-pony script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin + rm -rfv /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin/__pycache__ + install -D -p -m 0644 man/mid3cp.1 man/mid3iconv.1 man/mid3v2.1 man/moggsplit.1 man/mutagen-inspect.1 man/mutagen-pony.1 /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 + rm -rf docs/_build/.buildinfo docs/_build/.doctrees + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 1.47.0-3.fc40 --unique-debug-suffix -1.47.0-3.fc40.noarch --unique-debug-src-base python-mutagen-1.47.0-3.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/mutagen-1.47.0 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.dT0Exv + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd mutagen-1.47.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.3.2, pluggy-1.3.0 rootdir: /builddir/build/BUILD/mutagen-1.47.0 plugins: hypothesis-6.82.0 collected 4033 items tests/test___init__.py .....................................F........... [ 1%] ...........F......................FF.....................FF............. [ 3%] .......FFF....................FF......................F................. [ 4%] .....F......................F......................FF................... [ 6%] .FFF.....................F.....................FFF....................FF [ 8%] F....................FFF.....................F......................F... [ 10%] ..................FFF....................FF.....................FFF..... [ 11%] ...............FFF....................FFF....................FFF........ [ 13%] ............FFF.....................F.....................FFF........... [ 15%] ..........F.....................FFF....................FF............... [ 17%] ......FFF.....................F.....................FFF................. [ 19%] ....FF.....................FF....................FFF.................... [ 20%] FFF....................FFF.....................F......................F. [ 22%] ....................FF......................F.....................FFF... [ 24%] ..................F.....................FFF....................FFF...... [ 26%] ..............FFF....................FFF....................FFF......... [ 27%] ............F......................F.....................FFF............ [ 29%] .........F.....................FFF.....................F................ [ 31%] .....FFF.....................F.....................FFF.................. [ 33%] ..FFF........F...... [ 33%] tests/test__id3frames.py ............................................... [ 35%] ........................................................................ [ 36%] ........................................................................ [ 38%] ........................................................................ [ 40%] ........................................................................ [ 42%] ........................................................................ [ 43%] ........................................................................ [ 45%] ........................................................................ [ 47%] ........................................................................ [ 49%] ........................................................................ [ 51%] ........................................................................ [ 52%] ........................................................................ [ 54%] ........................................................................ [ 56%] ........................................................................ [ 58%] ...................................................... [ 59%] tests/test__id3specs.py .......................................... [ 60%] tests/test__id3util.py ......................... [ 61%] tests/test__iff.py . [ 61%] tests/test__riff.py ......... [ 61%] tests/test__util.py .................................................... [ 62%] ............................................. [ 63%] tests/test__vorbis.py .................................................. [ 65%] ........ [ 65%] tests/test_aac.py ............ [ 65%] tests/test_ac3.py ........ [ 65%] tests/test_aiff.py .................................. [ 66%] tests/test_apev2.py .................................................... [ 67%] .................................... [ 68%] tests/test_asf.py ...................................................... [ 70%] ....................................................................... [ 71%] tests/test_dsdiff.py ........... [ 72%] tests/test_dsf.py .................. [ 72%] tests/test_easyid3.py ............................................... [ 73%] tests/test_easymp4.py .............. [ 74%] tests/test_flac.py ..................................................... [ 75%] ........................................................................ [ 77%] ............................... [ 78%] tests/test_id3.py ...................................................... [ 79%] ........................................................................ [ 81%] ... [ 81%] tests/test_m4a.py . [ 81%] tests/test_monkeysaudio.py ........ [ 81%] tests/test_mp3.py ................................................ [ 82%] tests/test_mp4.py ...................................................... [ 83%] ........................................................................ [ 85%] ........................................................................ [ 87%] ........................................................................ [ 89%] ........................................................................ [ 91%] . [ 91%] tests/test_musepack.py ............ [ 91%] tests/test_ogg.py ................................................. [ 92%] tests/test_oggflac.py ................................... [ 93%] tests/test_oggopus.py ............................. [ 94%] tests/test_oggspeex.py ................................ [ 95%] tests/test_oggtheora.py .............................. [ 95%] tests/test_oggvorbis.py ........................................... [ 96%] tests/test_optimfrog.py ....... [ 97%] tests/test_smf.py .... [ 97%] tests/test_tak.py ......... [ 97%] tests/test_tools_mid3cp.py .............. [ 97%] tests/test_tools_mid3iconv.py ....... [ 97%] tests/test_tools_mid3v2.py ................................ [ 98%] tests/test_tools_moggsplit.py . [ 98%] tests/test_tools_mutagen_inspect.py . [ 98%] tests/test_tools_mutagen_pony.py . [ 98%] tests/test_tools_util.py .. [ 98%] tests/test_trueaudio.py .......... [ 99%] tests/test_wave.py ................. [ 99%] tests/test_wavpack.py ...................... [100%] =================================== FAILURES =================================== _____________________ TFileTypeAAC.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.24 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(106876305394833800868814445526371485158) to this test or run pytest with --hypothesis-seed=106876305394833800868814445526371485158 to reproduce this failure. ____________________ TFileTypeAAC_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.08 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(41029730041742610141737571547399379415) to this test or run pytest with --hypothesis-seed=41029730041742610141737571547399379415 to reproduce this failure. _____________________ TFileTypeAC3.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(33244582297365834896803734533976809604) to this test or run pytest with --hypothesis-seed=33244582297365834896803734533976809604 to reproduce this failure. _____________________ TFileTypeAC3.test_test_fileobj_save ______________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(4965309121152432650778423769061191948) to this test or run pytest with --hypothesis-seed=4965309121152432650778423769061191948 to reproduce this failure. ____________________ TFileTypeAC3_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.00 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(216051988275489442069630732162256642695) to this test or run pytest with --hypothesis-seed=216051988275489442069630732162256642695 to reproduce this failure. ____________________ TFileTypeAC3_2.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.04 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(268906881781539231879523329604298144691) to this test or run pytest with --hypothesis-seed=268906881781539231879523329604298144691 to reproduce this failure. ____________________ TFileTypeAIFF.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.28 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(231564879060946712657923402178550955482) to this test or run pytest with --hypothesis-seed=231564879060946712657923402178550955482 to reproduce this failure. _____________________ TFileTypeAIFF.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 2.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(98515390577277592333745158406743043452) to this test or run pytest with --hypothesis-seed=98515390577277592333745158406743043452 to reproduce this failure. _____________________ TFileTypeAIFF.test_test_fileobj_save _____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb1029b20> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): tests/test___init__.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6720708.244865235, result = None finish = 6720708.534205916, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=289341) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 289.34ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:358: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 289.34ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 289.34ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 289.34ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.11 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________ TFileTypeAIFF_2.test_test_fileobj_delete ___________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb0f9d080> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) tests/test___init__.py:371: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6720732.045514803, result = None finish = 6720732.378694781, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=333180) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 333.18ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:371: in run h, lambda t: o.delete(fileobj=t))) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 333.18ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 333.18ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(1), E ) E Unreliable test timings! On an initial run, this test took 333.18ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.49 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeAIFF_2.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.14 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(198472419986667294611920366864468032666) to this test or run pytest with --hypothesis-seed=198472419986667294611920366864468032666 to reproduce this failure. ____________________ TFileTypeAIFF_3.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.24 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(80145764714110363572448467623560625453) to this test or run pytest with --hypothesis-seed=80145764714110363572448467623560625453 to reproduce this failure. ____________________ TFileTypeAIFF_4.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.21 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(304961796734979436735700642104519224812) to this test or run pytest with --hypothesis-seed=304961796734979436735700642104519224812 to reproduce this failure. ____________________ TFileTypeAIFF_5.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.04 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(254967829271332564921368802699610663207) to this test or run pytest with --hypothesis-seed=254967829271332564921368802699610663207 to reproduce this failure. ____________________ TFileTypeAIFF_6.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.38 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(124477844268567424079661088563155428918) to this test or run pytest with --hypothesis-seed=124477844268567424079661088563155428918 to reproduce this failure. ____________________ TFileTypeAIFF_6.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.31 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(206842944786193179430840516622430459569) to this test or run pytest with --hypothesis-seed=206842944786193179430840516622430459569 to reproduce this failure. _________________ TFileTypeAPEv2File.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(326951691041167596298667811462637445568) to this test or run pytest with --hypothesis-seed=326951691041167596298667811462637445568 to reproduce this failure. __________________ TFileTypeAPEv2File.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(181964121428007635329349540489446351564) to this test or run pytest with --hypothesis-seed=181964121428007635329349540489446351564 to reproduce this failure. __________________ TFileTypeAPEv2File.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.00 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(239217273760239941065469888556150529631) to this test or run pytest with --hypothesis-seed=239217273760239941065469888556150529631 to reproduce this failure. _________________ TFileTypeAPEv2File_2.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.25 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(124475955485644982956096602177995694742) to this test or run pytest with --hypothesis-seed=124475955485644982956096602177995694742 to reproduce this failure. ____________________ TFileTypeASF.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.25 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(154431902392645245312047766526439466468) to this test or run pytest with --hypothesis-seed=154431902392645245312047766526439466468 to reproduce this failure. _____________________ TFileTypeASF.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.04 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(36275244696938866706289308113155226602) to this test or run pytest with --hypothesis-seed=36275244696938866706289308113155226602 to reproduce this failure. _____________________ TFileTypeASF.test_test_fileobj_save ______________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.67 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(437469287285370178429059470510876669) to this test or run pytest with --hypothesis-seed=437469287285370178429059470510876669 to reproduce this failure. ___________________ TFileTypeASF_2.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.53 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(238613720305685892539532646377873893784) to this test or run pytest with --hypothesis-seed=238613720305685892539532646377873893784 to reproduce this failure. ____________________ TFileTypeASF_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.11 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(263396623156029493746104860797963525827) to this test or run pytest with --hypothesis-seed=263396623156029493746104860797963525827 to reproduce this failure. ____________________ TFileTypeASF_2.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.20 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(296853348334762494442701712973661061376) to this test or run pytest with --hypothesis-seed=296853348334762494442701712973661061376 to reproduce this failure. ___________________ TFileTypeASF_3.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.24 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(302954733115516258082034302367155650588) to this test or run pytest with --hypothesis-seed=302954733115516258082034302367155650588 to reproduce this failure. ____________________ TFileTypeASF_3.test_test_fileobj_load _____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb0f2b4c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6720915.757571179, result = None finish = 6720916.136723755, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=379153) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 379.15ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 379.15ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 379.15ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(5), E ) E Unreliable test timings! On an initial run, this test took 379.15ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.88 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeASF_3.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.28 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(240105141967337201156478736463602729457) to this test or run pytest with --hypothesis-seed=240105141967337201156478736463602729457 to reproduce this failure. ____________________ TFileTypeDSDIFF.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.50 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(164056430579247661733388537193694917894) to this test or run pytest with --hypothesis-seed=164056430579247661733388537193694917894 to reproduce this failure. ___________________ TFileTypeDSDIFF_2.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.46 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(280419519830486797723793683358638725788) to this test or run pytest with --hypothesis-seed=280419519830486797723793683358638725788 to reproduce this failure. __________________ TFileTypeDSDIFF_3.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.04 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(104883980523912317622723786151099589415) to this test or run pytest with --hypothesis-seed=104883980523912317622723786151099589415 to reproduce this failure. ___________________ TFileTypeDSDIFF_3.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.26 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(125321658756323624348286570701529373602) to this test or run pytest with --hypothesis-seed=125321658756323624348286570701529373602 to reproduce this failure. ___________________ TFileTypeDSDIFF_3.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.21 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(284416361793498340952944913816204836187) to this test or run pytest with --hypothesis-seed=284416361793498340952944913816204836187 to reproduce this failure. ____________________ TFileTypeDSF.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.14 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(135183642313678118610360155464947858516) to this test or run pytest with --hypothesis-seed=135183642313678118610360155464947858516 to reproduce this failure. _____________________ TFileTypeDSF.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.21 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(122375104265482987356204686112423221343) to this test or run pytest with --hypothesis-seed=122375104265482987356204686112423221343 to reproduce this failure. ___________________ TFileTypeDSF_2.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.21 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(269940590948819991569480379436925725758) to this test or run pytest with --hypothesis-seed=269940590948819991569480379436925725758 to reproduce this failure. ____________________ TFileTypeDSF_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.27 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(7049110937257119428384660235254501039) to this test or run pytest with --hypothesis-seed=7049110937257119428384660235254501039 to reproduce this failure. ____________________ TFileTypeDSF_2.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.09 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(215772825836274515079174622160606988097) to this test or run pytest with --hypothesis-seed=215772825836274515079174622160606988097 to reproduce this failure. ___________________ TFileTypeDSF_3.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.11 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(288280699331753057467901551063121790263) to this test or run pytest with --hypothesis-seed=288280699331753057467901551063121790263 to reproduce this failure. ____________________ TFileTypeDSF_3.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.07 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(232262847280926198999755875629347719060) to this test or run pytest with --hypothesis-seed=232262847280926198999755875629347719060 to reproduce this failure. ____________________ TFileTypeDSF_3.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.47 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(140790411385163613884761551015576868327) to this test or run pytest with --hypothesis-seed=140790411385163613884761551015576868327 to reproduce this failure. ___________________ TFileTypeDSF_4.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.30 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(4906424363025672396214231733283582154) to this test or run pytest with --hypothesis-seed=4906424363025672396214231733283582154 to reproduce this failure. ____________________ TFileTypeDSF_4.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.36 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(5477075168676070697803376150416122832) to this test or run pytest with --hypothesis-seed=5477075168676070697803376150416122832 to reproduce this failure. ____________________ TFileTypeDSF_4.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 1 valid examples in 1.61 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(159305765116580110466324438457517841620) to this test or run pytest with --hypothesis-seed=159305765116580110466324438457517841620 to reproduce this failure. ____________________ TFileTypeFLAC.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.12 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(252201601122186897964473393006616889025) to this test or run pytest with --hypothesis-seed=252201601122186897964473393006616889025 to reproduce this failure. _____________________ TFileTypeFLAC.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.48 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(235700963112533103818834523545111385260) to this test or run pytest with --hypothesis-seed=235700963112533103818834523545111385260 to reproduce this failure. _____________________ TFileTypeFLAC.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.36 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(189227701104930912784691291473250929007) to this test or run pytest with --hypothesis-seed=189227701104930912784691291473250929007 to reproduce this failure. ________________ TFileTypeID3FileType.test_test_fileobj_delete _________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.16 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(45926528481326480828045605332542307238) to this test or run pytest with --hypothesis-seed=45926528481326480828045605332542307238 to reproduce this failure. _________________ TFileTypeID3FileType.test_test_fileobj_load __________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb0f29d00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721121.850549715, result = None finish = 6721122.172151294, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=321602) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 321.60ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 321.60ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 321.60ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(6), E ) E Unreliable test timings! On an initial run, this test took 321.60ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 19.64 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________ TFileTypeID3FileType.test_test_fileobj_save __________________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb0f294e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): tests/test___init__.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721139.192509198, result = None finish = 6721139.577981274, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=385472) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 385.47ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:358: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 385.47ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 385.47ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 385.47ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 8.20 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________ TFileTypeID3FileType_2.test_test_fileobj_load _________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.09 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(230942562451348273491777021762170974565) to this test or run pytest with --hypothesis-seed=230942562451348273491777021762170974565 to reproduce this failure. _______________ TFileTypeID3FileType_3.test_test_fileobj_delete ________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.05 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(226624648346243456863659227708713695544) to this test or run pytest with --hypothesis-seed=226624648346243456863659227708713695544 to reproduce this failure. ________________ TFileTypeID3FileType_3.test_test_fileobj_load _________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.26 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(35567533043147601021880031391820782427) to this test or run pytest with --hypothesis-seed=35567533043147601021880031391820782427 to reproduce this failure. ________________ TFileTypeID3FileType_3.test_test_fileobj_save _________________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb0f2a520> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): tests/test___init__.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721194.848552016, result = None finish = 6721195.152119197, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=303567) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 303.57ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:358: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 303.57ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 303.57ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 303.57ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 9.79 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ________________ TFileTypeID3FileType_4.test_test_fileobj_load _________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(130343098694219197163275711170855227146) to this test or run pytest with --hypothesis-seed=130343098694219197163275711170855227146 to reproduce this failure. ____________________ TFileTypeMP3.test_test_fileobj_delete _____________________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffffb0e34fe0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) tests/test___init__.py:371: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721243.094466211, result = None finish = 6721243.346805995, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=252340) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 252.34ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:371: in run h, lambda t: o.delete(fileobj=t))) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 252.34ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 252.34ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 252.34ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.66 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________ TFileTypeMP3.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.32 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(299681641965808038890991875247390208320) to this test or run pytest with --hypothesis-seed=299681641965808038890991875247390208320 to reproduce this failure. _____________________ TFileTypeMP3.test_test_fileobj_save ______________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.46 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(108506345889967710007040525781010923318) to this test or run pytest with --hypothesis-seed=108506345889967710007040525781010923318 to reproduce this failure. ___________________ TFileTypeMP3_2.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.12 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(9164855893857367485403115072451787861) to this test or run pytest with --hypothesis-seed=9164855893857367485403115072451787861 to reproduce this failure. ____________________ TFileTypeMP3_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.16 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(115603252252868566677622955247918894555) to this test or run pytest with --hypothesis-seed=115603252252868566677622955247918894555 to reproduce this failure. ___________________ TFileTypeMP3_3.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.03 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(93055807228514824042851468134761384624) to this test or run pytest with --hypothesis-seed=93055807228514824042851468134761384624 to reproduce this failure. ____________________ TFileTypeMP3_3.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.07 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(6816705884099841066409873065570629396) to this test or run pytest with --hypothesis-seed=6816705884099841066409873065570629396 to reproduce this failure. ____________________ TFileTypeMP3_3.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.20 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(333926674313701086057245965490165025158) to this test or run pytest with --hypothesis-seed=333926674313701086057245965490165025158 to reproduce this failure. ____________________ TFileTypeMP3_4.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(252274415444904925115412232888396800345) to this test or run pytest with --hypothesis-seed=252274415444904925115412232888396800345 to reproduce this failure. ____________________ TFileTypeMP4.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.16 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(327659442385628359938482190920297747262) to this test or run pytest with --hypothesis-seed=327659442385628359938482190920297747262 to reproduce this failure. _____________________ TFileTypeMP4.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.32 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(340095593858687554970149087852195436809) to this test or run pytest with --hypothesis-seed=340095593858687554970149087852195436809 to reproduce this failure. _____________________ TFileTypeMP4.test_test_fileobj_save ______________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.11 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(314482277867538775323537437170068391056) to this test or run pytest with --hypothesis-seed=314482277867538775323537437170068391056 to reproduce this failure. ____________________ TFileTypeMP4_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.32 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(68690314937318529873416270112330901654) to this test or run pytest with --hypothesis-seed=68690314937318529873416270112330901654 to reproduce this failure. ____________________ TFileTypeMP4_2.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 2.74 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(36760964525009606968982680310264748117) to this test or run pytest with --hypothesis-seed=36760964525009606968982680310264748117 to reproduce this failure. ____________________ TFileTypeMP4_3.test_test_fileobj_load _____________________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb0e2d6c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721408.621399085, result = None finish = 6721408.945350883, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=323952) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 323.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 323.95ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 323.95ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 323.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.04 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeMP4_3.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.16 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(31141862727007249335466999341332934612) to this test or run pytest with --hypothesis-seed=31141862727007249335466999341332934612 to reproduce this failure. ___________________ TFileTypeMP4_4.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 3.48 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(204235666887673608064241724458792652901) to this test or run pytest with --hypothesis-seed=204235666887673608064241724458792652901 to reproduce this failure. ____________________ TFileTypeMP4_4.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(316748429021185265302921422531039392360) to this test or run pytest with --hypothesis-seed=316748429021185265302921422531039392360 to reproduce this failure. ____________________ TFileTypeMP4_4.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.53 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(240995566441020989466466495639913490245) to this test or run pytest with --hypothesis-seed=240995566441020989466466495639913490245 to reproduce this failure. ___________________ TFileTypeMP4_5.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.20 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(61956322682264197020735890366699977332) to this test or run pytest with --hypothesis-seed=61956322682264197020735890366699977332 to reproduce this failure. ____________________ TFileTypeMP4_5.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.09 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(334407592730097248698264884550167506376) to this test or run pytest with --hypothesis-seed=334407592730097248698264884550167506376 to reproduce this failure. ____________________ TFileTypeMP4_5.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.08 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(268228315144376025879658039952072887281) to this test or run pytest with --hypothesis-seed=268228315144376025879658039952072887281 to reproduce this failure. ___________________ TFileTypeMP4_6.test_test_fileobj_delete ____________________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb0def9c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) tests/test___init__.py:371: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721504.381021949, result = None finish = 6721504.655363147, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=274341) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 274.34ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:371: in run h, lambda t: o.delete(fileobj=t))) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 274.34ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 274.34ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 274.34ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.65 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeMP4_6.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.26 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(110501646223088217917815415430394455073) to this test or run pytest with --hypothesis-seed=110501646223088217917815415430394455073 to reproduce this failure. ____________________ TFileTypeMP4_6.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.35 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(136393860327461033514125994037007725137) to this test or run pytest with --hypothesis-seed=136393860327461033514125994037007725137 to reproduce this failure. _________________ TFileTypeMonkeysAudio.test_test_fileobj_load _________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.35 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(156742855336454575253694814059955740256) to this test or run pytest with --hypothesis-seed=156742855336454575253694814059955740256 to reproduce this failure. ________________ TFileTypeMonkeysAudio_2.test_test_fileobj_load ________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.05 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(117319755811668251010259607550783959190) to this test or run pytest with --hypothesis-seed=117319755811668251010259607550783959190 to reproduce this failure. __________________ TFileTypeMusepack.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(201769987429345058463193554223719635862) to this test or run pytest with --hypothesis-seed=201769987429345058463193554223719635862 to reproduce this failure. ___________________ TFileTypeMusepack.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.30 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(266693614467724668685119758670519462351) to this test or run pytest with --hypothesis-seed=266693614467724668685119758670519462351 to reproduce this failure. __________________ TFileTypeMusepack_2.test_test_fileobj_load __________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb0dedc60> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721598.236316026, result = None finish = 6721598.751042523, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=514726) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 514.73ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 514.73ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 514.73ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(17), E ) E Unreliable test timings! On an initial run, this test took 514.73ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.78 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _________________ TFileTypeMusepack_3.test_test_fileobj_delete _________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.04 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(225229184948906437541524477927099592118) to this test or run pytest with --hypothesis-seed=225229184948906437541524477927099592118 to reproduce this failure. __________________ TFileTypeMusepack_3.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.03 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(327917076941141730407212313000628633718) to this test or run pytest with --hypothesis-seed=327917076941141730407212313000628633718 to reproduce this failure. __________________ TFileTypeMusepack_3.test_test_fileobj_save __________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(205352197357357312161561825407541038553) to this test or run pytest with --hypothesis-seed=205352197357357312161561825407541038553 to reproduce this failure. __________________ TFileTypeMusepack_4.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.10 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(110208713070202288431777647409078222675) to this test or run pytest with --hypothesis-seed=110208713070202288431777647409078222675 to reproduce this failure. __________________ TFileTypeOggFLAC.test_test_fileobj_delete ___________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.07 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(280858384242202470798626906939674354903) to this test or run pytest with --hypothesis-seed=280858384242202470798626906939674354903 to reproduce this failure. ___________________ TFileTypeOggFLAC.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.21 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(313610907749613582099339795939672053932) to this test or run pytest with --hypothesis-seed=313610907749613582099339795939672053932 to reproduce this failure. ___________________ TFileTypeOggFLAC.test_test_fileobj_save ____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb0e36c00> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): tests/test___init__.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721672.677007732, result = None finish = 6721672.95658843, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=279581) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 279.58ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:358: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 279.58ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 279.58ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 279.58ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.68 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________________ TFileTypeOggOpus.test_test_fileobj_delete ___________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.46 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(177026900177983435449401864451586997298) to this test or run pytest with --hypothesis-seed=177026900177983435449401864451586997298 to reproduce this failure. ___________________ TFileTypeOggOpus.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.49 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(94626549783663932709661167219814595706) to this test or run pytest with --hypothesis-seed=94626549783663932709661167219814595706 to reproduce this failure. ___________________ TFileTypeOggOpus.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(129755788031451601701450858867057237639) to this test or run pytest with --hypothesis-seed=129755788031451601701450858867057237639 to reproduce this failure. __________________ TFileTypeOggSpeex.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.45 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(226985208363992358251476759264499703892) to this test or run pytest with --hypothesis-seed=226985208363992358251476759264499703892 to reproduce this failure. ___________________ TFileTypeOggSpeex.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.52 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(284702854676882157681839228050926626903) to this test or run pytest with --hypothesis-seed=284702854676882157681839228050926626903 to reproduce this failure. ___________________ TFileTypeOggSpeex.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.54 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(82960593850766725721172053507842696512) to this test or run pytest with --hypothesis-seed=82960593850766725721172053507842696512 to reproduce this failure. _________________ TFileTypeOggTheora.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.12 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(311614670691681989289513314735830560576) to this test or run pytest with --hypothesis-seed=311614670691681989289513314735830560576 to reproduce this failure. __________________ TFileTypeOggTheora.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.34 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(144569556423336950540036358826002040265) to this test or run pytest with --hypothesis-seed=144569556423336950540036358826002040265 to reproduce this failure. __________________ TFileTypeOggTheora.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(154706898832327529476467199332988264895) to this test or run pytest with --hypothesis-seed=154706898832327529476467199332988264895 to reproduce this failure. _________________ TFileTypeOggVorbis.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.36 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(195923185919323959229759925225018562791) to this test or run pytest with --hypothesis-seed=195923185919323959229759925225018562791 to reproduce this failure. __________________ TFileTypeOggVorbis.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.17 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(9727890339980491087667782785044897266) to this test or run pytest with --hypothesis-seed=9727890339980491087667782785044897266 to reproduce this failure. __________________ TFileTypeOggVorbis.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(287077171472024457013395798885397591788) to this test or run pytest with --hypothesis-seed=287077171472024457013395798885397591788 to reproduce this failure. __________________ TFileTypeOptimFROG.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(309767150112403470459283342126549923749) to this test or run pytest with --hypothesis-seed=309767150112403470459283342126549923749 to reproduce this failure. _________________ TFileTypeOptimFROG_2.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 1 valid examples in 2.23 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(30067580410004324418266556714609871031) to this test or run pytest with --hypothesis-seed=30067580410004324418266556714609871031 to reproduce this failure. ____________________ TFileTypeSMF.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(333849699632576897724491268504970718699) to this test or run pytest with --hypothesis-seed=333849699632576897724491268504970718699 to reproduce this failure. _____________________ TFileTypeSMF.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.33 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(73209609767570483331592600379441378156) to this test or run pytest with --hypothesis-seed=73209609767570483331592600379441378156 to reproduce this failure. _____________________ TFileTypeSMF.test_test_fileobj_save ______________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.34 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(273107721873706897769482782160355891464) to this test or run pytest with --hypothesis-seed=273107721873706897769482782160355891464 to reproduce this failure. _____________________ TFileTypeTAK.test_test_fileobj_load ______________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb0e66de0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721885.44521322, result = None finish = 6721885.759776818, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=314564) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 314.56ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 314.56ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 314.56ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(37), E ) E Unreliable test timings! On an initial run, this test took 314.56ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 5.81 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________ TFileTypeTAK_2.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.48 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(43868841410032698205787833146201293978) to this test or run pytest with --hypothesis-seed=43868841410032698205787833146201293978 to reproduce this failure. ____________________ TFileTypeTAK_2.test_test_fileobj_load _____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb0d41080> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 6721919.670242493, result = None finish = 6721920.160938489, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=490696) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 490.70ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 490.70ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 490.70ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(54), E ) E Unreliable test timings! On an initial run, this test took 490.70ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.28 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeTAK_2.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.35 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(316215161806243099840715641852448954645) to this test or run pytest with --hypothesis-seed=316215161806243099840715641852448954645 to reproduce this failure. __________________ TFileTypeTrueAudio.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(177014801693355688408940992951646207768) to this test or run pytest with --hypothesis-seed=177014801693355688408940992951646207768 to reproduce this failure. ____________________ TFileTypeWAVE.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.35 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(289196172181063912993755566541328657179) to this test or run pytest with --hypothesis-seed=289196172181063912993755566541328657179 to reproduce this failure. _____________________ TFileTypeWAVE.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.11 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(99219113655519680321720068126764093608) to this test or run pytest with --hypothesis-seed=99219113655519680321720068126764093608 to reproduce this failure. _____________________ TFileTypeWAVE.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(274533922225623099265206684335041807821) to this test or run pytest with --hypothesis-seed=274533922225623099265206684335041807821 to reproduce this failure. ____________________ TFileTypeWAVE_2.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.26 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(89793891186478683151799888371972945746) to this test or run pytest with --hypothesis-seed=89793891186478683151799888371972945746 to reproduce this failure. ___________________ TFileTypeWAVE_3.test_test_fileobj_delete ___________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.38 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(2549472486777030376092810686511057158) to this test or run pytest with --hypothesis-seed=2549472486777030376092810686511057158 to reproduce this failure. ____________________ TFileTypeWAVE_3.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.29 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(237119945633121093885059365792292253227) to this test or run pytest with --hypothesis-seed=237119945633121093885059365792292253227 to reproduce this failure. ____________________ TFileTypeWAVE_3.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.37 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(95469287509276137253420212909535255476) to this test or run pytest with --hypothesis-seed=95469287509276137253420212909535255476 to reproduce this failure. __________________ TFileTypeWavPack.test_test_fileobj_delete ___________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.33 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(315754491717625394447552533664318008208) to this test or run pytest with --hypothesis-seed=315754491717625394447552533664318008208 to reproduce this failure. ___________________ TFileTypeWavPack.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.28 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(145651857482802614106535407967981926813) to this test or run pytest with --hypothesis-seed=145651857482802614106535407967981926813 to reproduce this failure. ___________________ TFileTypeWavPack.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.20 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(28515094734472514572927158111676784990) to this test or run pytest with --hypothesis-seed=28515094734472514572927158111676784990 to reproduce this failure. ___________________________ TFile.test_mock_fileobj ____________________________ self = def test_mock_fileobj(self): for filename in self.filenames: with open(filename, "rb") as h: @given(generate_test_file_objects(h, File)) def run(t): try: File(t) except MutagenError: pass > run() tests/test___init__.py:623: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, File)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.09 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:617: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(68458133148947342119856342421693866449) to this test or run pytest with --hypothesis-seed=68458133148947342119856342421693866449 to reproduce this failure. =========================== short test summary info ============================ FAILED tests/test___init__.py::TFileTypeAAC::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeAAC_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeAC3::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeAC3::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeAC3_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeAC3_2::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeAIFF::test_test_fileobj_delete - hypo... FAILED tests/test___init__.py::TFileTypeAIFF::test_test_fileobj_load - hypoth... FAILED tests/test___init__.py::TFileTypeAIFF::test_test_fileobj_save - hypoth... FAILED tests/test___init__.py::TFileTypeAIFF_2::test_test_fileobj_delete - hy... FAILED tests/test___init__.py::TFileTypeAIFF_2::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_3::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_4::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_5::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_6::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_6::test_test_fileobj_save - hypo... FAILED tests/test___init__.py::TFileTypeAPEv2File::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeAPEv2File::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeAPEv2File::test_test_fileobj_save - h... FAILED tests/test___init__.py::TFileTypeAPEv2File_2::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeASF::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeASF::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeASF::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeASF_2::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeASF_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeASF_2::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeASF_3::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeASF_3::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeASF_3::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeDSDIFF::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeDSDIFF_2::test_test_fileobj_load - hy... FAILED tests/test___init__.py::TFileTypeDSDIFF_3::test_test_fileobj_delete - ... FAILED tests/test___init__.py::TFileTypeDSDIFF_3::test_test_fileobj_load - hy... FAILED tests/test___init__.py::TFileTypeDSDIFF_3::test_test_fileobj_save - hy... FAILED tests/test___init__.py::TFileTypeDSF::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeDSF::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeDSF_2::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeDSF_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeDSF_2::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeDSF_3::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeDSF_3::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeDSF_3::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeDSF_4::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeDSF_4::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeDSF_4::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeFLAC::test_test_fileobj_delete - hypo... FAILED tests/test___init__.py::TFileTypeFLAC::test_test_fileobj_load - hypoth... FAILED tests/test___init__.py::TFileTypeFLAC::test_test_fileobj_save - hypoth... FAILED tests/test___init__.py::TFileTypeID3FileType::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeID3FileType::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeID3FileType::test_test_fileobj_save FAILED tests/test___init__.py::TFileTypeID3FileType_2::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeID3FileType_3::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeID3FileType_3::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeID3FileType_3::test_test_fileobj_save FAILED tests/test___init__.py::TFileTypeID3FileType_4::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeMP3::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeMP3::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeMP3::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeMP3_2::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP3_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP3_3::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP3_3::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP3_3::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP3_4::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeMP4::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeMP4::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeMP4_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_2::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP4_3::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_3::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP4_4::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP4_4::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_4::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP4_5::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP4_5::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_5::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP4_6::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP4_6::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_6::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMonkeysAudio::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeMonkeysAudio_2::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeMusepack::test_test_fileobj_delete - ... FAILED tests/test___init__.py::TFileTypeMusepack::test_test_fileobj_load - hy... FAILED tests/test___init__.py::TFileTypeMusepack_2::test_test_fileobj_load - ... FAILED tests/test___init__.py::TFileTypeMusepack_3::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeMusepack_3::test_test_fileobj_load - ... FAILED tests/test___init__.py::TFileTypeMusepack_3::test_test_fileobj_save - ... FAILED tests/test___init__.py::TFileTypeMusepack_4::test_test_fileobj_load - ... FAILED tests/test___init__.py::TFileTypeOggFLAC::test_test_fileobj_delete - h... FAILED tests/test___init__.py::TFileTypeOggFLAC::test_test_fileobj_load - hyp... FAILED tests/test___init__.py::TFileTypeOggFLAC::test_test_fileobj_save - hyp... FAILED tests/test___init__.py::TFileTypeOggOpus::test_test_fileobj_delete - h... FAILED tests/test___init__.py::TFileTypeOggOpus::test_test_fileobj_load - hyp... FAILED tests/test___init__.py::TFileTypeOggOpus::test_test_fileobj_save - hyp... FAILED tests/test___init__.py::TFileTypeOggSpeex::test_test_fileobj_delete - ... FAILED tests/test___init__.py::TFileTypeOggSpeex::test_test_fileobj_load - hy... FAILED tests/test___init__.py::TFileTypeOggSpeex::test_test_fileobj_save - hy... FAILED tests/test___init__.py::TFileTypeOggTheora::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeOggTheora::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeOggTheora::test_test_fileobj_save - h... FAILED tests/test___init__.py::TFileTypeOggVorbis::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeOggVorbis::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeOggVorbis::test_test_fileobj_save - h... FAILED tests/test___init__.py::TFileTypeOptimFROG::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeOptimFROG_2::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeSMF::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeSMF::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeSMF::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeTAK::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeTAK_2::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeTAK_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeTAK_2::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeTrueAudio::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeWAVE::test_test_fileobj_delete - hypo... FAILED tests/test___init__.py::TFileTypeWAVE::test_test_fileobj_load - hypoth... FAILED tests/test___init__.py::TFileTypeWAVE::test_test_fileobj_save - hypoth... FAILED tests/test___init__.py::TFileTypeWAVE_2::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeWAVE_3::test_test_fileobj_delete - hy... FAILED tests/test___init__.py::TFileTypeWAVE_3::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeWAVE_3::test_test_fileobj_save - hypo... FAILED tests/test___init__.py::TFileTypeWavPack::test_test_fileobj_delete - h... FAILED tests/test___init__.py::TFileTypeWavPack::test_test_fileobj_load - hyp... FAILED tests/test___init__.py::TFileTypeWavPack::test_test_fileobj_save - hyp... FAILED tests/test___init__.py::TFile::test_mock_fileobj - hypothesis.errors.F... ================ 126 failed, 3907 passed in 7372.96s (2:02:52) ================= error: Bad exit status from /var/tmp/rpm-tmp.dT0Exv (%check) RPM build errors: Bad exit status from /var/tmp/rpm-tmp.dT0Exv (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -bb --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -bb --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec